diff options
author | Kevin Hilman <khilman@linaro.org> | 2015-06-25 00:32:26 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@linaro.org> | 2015-06-25 00:32:26 -0400 |
commit | 32270e805a1e0baf39aa040177ef1896f03b7266 (patch) | |
tree | 89f8ac72b98d4f37377fa58f3fa9804dd32f2350 | |
parent | 39e79b873e1bafc5637a1a704495b01edbe469b1 (diff) | |
parent | 4af34b572a85c44c55491a10693535a79627c478 (diff) |
Merge tag 'armsoc-drivers' into test-merge
ARM: SoC: driver updates for v4.2
Some of these are for drivers/soc, where we're now putting
SoC-specific drivers these days. Some are for other driver subsystems
where we have received acks from the appropriate maintainers.
Some highlights:
- simple-mfd: document DT bindings and misc updates
- migrate mach-berlin to simple-mfd for clock, pinctrl and reset
- memory: support for Tegra132 SoC
- memory: introduce tegra EMC driver for scaling memory frequency
- misc. updates for ARM CCI and CCN busses
Conflicts:
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
Trivial add/add conflict with our dt branch.
Resolution: take both sides.
# gpg: Signature made Wed Jun 24 21:32:17 2015 PDT using RSA key ID D3FBC665
# gpg: Good signature from "Kevin Hilman <khilman@deeprootsystems.com>"
# gpg: aka "Kevin Hilman <khilman@linaro.org>"
# gpg: aka "Kevin Hilman <khilman@kernel.org>"
# Conflicts:
# arch/arm64/boot/dts/arm/juno-motherboard.dtsi
57 files changed, 4966 insertions, 1067 deletions
diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt index 0632b3aad83e..ffca443a19b4 100644 --- a/Documentation/arm/CCN.txt +++ b/Documentation/arm/CCN.txt | |||
@@ -33,20 +33,23 @@ directory, with first 8 configurable by user and additional | |||
33 | Cycle counter is described by a "type" value 0xff and does | 33 | Cycle counter is described by a "type" value 0xff and does |
34 | not require any other settings. | 34 | not require any other settings. |
35 | 35 | ||
36 | The driver also provides a "cpumask" sysfs attribute, which contains | ||
37 | a single CPU ID, of the processor which will be used to handle all | ||
38 | the CCN PMU events. It is recommended that the user space tools | ||
39 | request the events on this processor (if not, the perf_event->cpu value | ||
40 | will be overwritten anyway). In case of this processor being offlined, | ||
41 | the events are migrated to another one and the attribute is updated. | ||
42 | |||
36 | Example of perf tool use: | 43 | Example of perf tool use: |
37 | 44 | ||
38 | / # perf list | grep ccn | 45 | / # perf list | grep ccn |
39 | ccn/cycles/ [Kernel PMU event] | 46 | ccn/cycles/ [Kernel PMU event] |
40 | <...> | 47 | <...> |
41 | ccn/xp_valid_flit/ [Kernel PMU event] | 48 | ccn/xp_valid_flit,xp=?,port=?,vc=?,dir=?/ [Kernel PMU event] |
42 | <...> | 49 | <...> |
43 | 50 | ||
44 | / # perf stat -C 0 -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \ | 51 | / # perf stat -a -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \ |
45 | sleep 1 | 52 | sleep 1 |
46 | 53 | ||
47 | The driver does not support sampling, therefore "perf record" will | 54 | The driver does not support sampling, therefore "perf record" will |
48 | not work. Also notice that only single cpu is being selected | 55 | not work. Per-task (without "-a") perf sessions are not supported. |
49 | ("-C 0") - this is because perf framework does not support | ||
50 | "non-CPU related" counters (yet?) so system-wide session ("-a") | ||
51 | would try (and in most cases fail) to set up the same event | ||
52 | per each CPU. | ||
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt index 3c5c631328d3..aef1d200a9b2 100644 --- a/Documentation/devicetree/bindings/arm/cci.txt +++ b/Documentation/devicetree/bindings/arm/cci.txt | |||
@@ -31,8 +31,9 @@ specific to ARM. | |||
31 | - compatible | 31 | - compatible |
32 | Usage: required | 32 | Usage: required |
33 | Value type: <string> | 33 | Value type: <string> |
34 | Definition: must be set to | 34 | Definition: must contain one of the following: |
35 | "arm,cci-400" | 35 | "arm,cci-400" |
36 | "arm,cci-500" | ||
36 | 37 | ||
37 | - reg | 38 | - reg |
38 | Usage: required | 39 | Usage: required |
@@ -99,6 +100,7 @@ specific to ARM. | |||
99 | "arm,cci-400-pmu,r1" | 100 | "arm,cci-400-pmu,r1" |
100 | "arm,cci-400-pmu" - DEPRECATED, permitted only where OS has | 101 | "arm,cci-400-pmu" - DEPRECATED, permitted only where OS has |
101 | secure acces to CCI registers | 102 | secure acces to CCI registers |
103 | "arm,cci-500-pmu,r0" | ||
102 | - reg: | 104 | - reg: |
103 | Usage: required | 105 | Usage: required |
104 | Value type: Integer cells. A register entry, expressed | 106 | Value type: Integer cells. A register entry, expressed |
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra-mc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra-mc.txt index f3db93c85eea..3338a2834ad7 100644 --- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra-mc.txt +++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra-mc.txt | |||
@@ -1,6 +1,9 @@ | |||
1 | NVIDIA Tegra Memory Controller device tree bindings | 1 | NVIDIA Tegra Memory Controller device tree bindings |
2 | =================================================== | 2 | =================================================== |
3 | 3 | ||
4 | memory-controller node | ||
5 | ---------------------- | ||
6 | |||
4 | Required properties: | 7 | Required properties: |
5 | - compatible: Should be "nvidia,tegra<chip>-mc" | 8 | - compatible: Should be "nvidia,tegra<chip>-mc" |
6 | - reg: Physical base address and length of the controller's registers. | 9 | - reg: Physical base address and length of the controller's registers. |
@@ -15,9 +18,49 @@ Required properties: | |||
15 | This device implements an IOMMU that complies with the generic IOMMU binding. | 18 | This device implements an IOMMU that complies with the generic IOMMU binding. |
16 | See ../iommu/iommu.txt for details. | 19 | See ../iommu/iommu.txt for details. |
17 | 20 | ||
18 | Example: | 21 | emc-timings subnode |
19 | -------- | 22 | ------------------- |
23 | |||
24 | The node should contain a "emc-timings" subnode for each supported RAM type (see field RAM_CODE in | ||
25 | register PMC_STRAPPING_OPT_A). | ||
26 | |||
27 | Required properties for "emc-timings" nodes : | ||
28 | - nvidia,ram-code : Should contain the value of RAM_CODE this timing set is used for. | ||
29 | |||
30 | timing subnode | ||
31 | -------------- | ||
32 | |||
33 | Each "emc-timings" node should contain a subnode for every supported EMC clock rate. | ||
34 | |||
35 | Required properties for timing nodes : | ||
36 | - clock-frequency : Should contain the memory clock rate in Hz. | ||
37 | - nvidia,emem-configuration : Values to be written to the EMEM register block. For the Tegra124 SoC | ||
38 | (see section "15.6.1 MC Registers" in the TRM), these are the registers whose values need to be | ||
39 | specified, according to the board documentation: | ||
40 | |||
41 | MC_EMEM_ARB_CFG | ||
42 | MC_EMEM_ARB_OUTSTANDING_REQ | ||
43 | MC_EMEM_ARB_TIMING_RCD | ||
44 | MC_EMEM_ARB_TIMING_RP | ||
45 | MC_EMEM_ARB_TIMING_RC | ||
46 | MC_EMEM_ARB_TIMING_RAS | ||
47 | MC_EMEM_ARB_TIMING_FAW | ||
48 | MC_EMEM_ARB_TIMING_RRD | ||
49 | MC_EMEM_ARB_TIMING_RAP2PRE | ||
50 | MC_EMEM_ARB_TIMING_WAP2PRE | ||
51 | MC_EMEM_ARB_TIMING_R2R | ||
52 | MC_EMEM_ARB_TIMING_W2W | ||
53 | MC_EMEM_ARB_TIMING_R2W | ||
54 | MC_EMEM_ARB_TIMING_W2R | ||
55 | MC_EMEM_ARB_DA_TURNS | ||
56 | MC_EMEM_ARB_DA_COVERS | ||
57 | MC_EMEM_ARB_MISC0 | ||
58 | MC_EMEM_ARB_MISC1 | ||
59 | MC_EMEM_ARB_RING1_THROTTLE | ||
20 | 60 | ||
61 | Example SoC include file: | ||
62 | |||
63 | / { | ||
21 | mc: memory-controller@0,70019000 { | 64 | mc: memory-controller@0,70019000 { |
22 | compatible = "nvidia,tegra124-mc"; | 65 | compatible = "nvidia,tegra124-mc"; |
23 | reg = <0x0 0x70019000 0x0 0x1000>; | 66 | reg = <0x0 0x70019000 0x0 0x1000>; |
@@ -34,3 +77,40 @@ Example: | |||
34 | ... | 77 | ... |
35 | iommus = <&mc TEGRA_SWGROUP_SDMMC1A>; | 78 | iommus = <&mc TEGRA_SWGROUP_SDMMC1A>; |
36 | }; | 79 | }; |
80 | }; | ||
81 | |||
82 | Example board file: | ||
83 | |||
84 | / { | ||
85 | memory-controller@0,70019000 { | ||
86 | emc-timings-3 { | ||
87 | nvidia,ram-code = <3>; | ||
88 | |||
89 | timing-12750000 { | ||
90 | clock-frequency = <12750000>; | ||
91 | |||
92 | nvidia,emem-configuration = < | ||
93 | 0x40040001 /* MC_EMEM_ARB_CFG */ | ||
94 | 0x8000000a /* MC_EMEM_ARB_OUTSTANDING_REQ */ | ||
95 | 0x00000001 /* MC_EMEM_ARB_TIMING_RCD */ | ||
96 | 0x00000001 /* MC_EMEM_ARB_TIMING_RP */ | ||
97 | 0x00000002 /* MC_EMEM_ARB_TIMING_RC */ | ||
98 | 0x00000000 /* MC_EMEM_ARB_TIMING_RAS */ | ||
99 | 0x00000002 /* MC_EMEM_ARB_TIMING_FAW */ | ||
100 | 0x00000001 /* MC_EMEM_ARB_TIMING_RRD */ | ||
101 | 0x00000002 /* MC_EMEM_ARB_TIMING_RAP2PRE */ | ||
102 | 0x00000008 /* MC_EMEM_ARB_TIMING_WAP2PRE */ | ||
103 | 0x00000003 /* MC_EMEM_ARB_TIMING_R2R */ | ||
104 | 0x00000002 /* MC_EMEM_ARB_TIMING_W2W */ | ||
105 | 0x00000003 /* MC_EMEM_ARB_TIMING_R2W */ | ||
106 | 0x00000006 /* MC_EMEM_ARB_TIMING_W2R */ | ||
107 | 0x06030203 /* MC_EMEM_ARB_DA_TURNS */ | ||
108 | 0x000a0402 /* MC_EMEM_ARB_DA_COVERS */ | ||
109 | 0x77e30303 /* MC_EMEM_ARB_MISC0 */ | ||
110 | 0x70000f03 /* MC_EMEM_ARB_MISC1 */ | ||
111 | 0x001f0000 /* MC_EMEM_ARB_RING1_THROTTLE */ | ||
112 | >; | ||
113 | }; | ||
114 | }; | ||
115 | }; | ||
116 | }; | ||
diff --git a/Documentation/devicetree/bindings/memory-controllers/tegra-emc.txt b/Documentation/devicetree/bindings/memory-controllers/tegra-emc.txt new file mode 100644 index 000000000000..b59c625d6336 --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/tegra-emc.txt | |||
@@ -0,0 +1,374 @@ | |||
1 | NVIDIA Tegra124 SoC EMC (external memory controller) | ||
2 | ==================================================== | ||
3 | |||
4 | Required properties : | ||
5 | - compatible : Should be "nvidia,tegra124-emc". | ||
6 | - reg : physical base address and length of the controller's registers. | ||
7 | - nvidia,memory-controller : phandle of the MC driver. | ||
8 | |||
9 | The node should contain a "emc-timings" subnode for each supported RAM type | ||
10 | (see field RAM_CODE in register PMC_STRAPPING_OPT_A), with its unit address | ||
11 | being its RAM_CODE. | ||
12 | |||
13 | Required properties for "emc-timings" nodes : | ||
14 | - nvidia,ram-code : Should contain the value of RAM_CODE this timing set is | ||
15 | used for. | ||
16 | |||
17 | Each "emc-timings" node should contain a "timing" subnode for every supported | ||
18 | EMC clock rate. The "timing" subnodes should have the clock rate in Hz as | ||
19 | their unit address. | ||
20 | |||
21 | Required properties for "timing" nodes : | ||
22 | - clock-frequency : Should contain the memory clock rate in Hz. | ||
23 | - The following properties contain EMC timing characterization values | ||
24 | (specified in the board documentation) : | ||
25 | - nvidia,emc-auto-cal-config : EMC_AUTO_CAL_CONFIG | ||
26 | - nvidia,emc-auto-cal-config2 : EMC_AUTO_CAL_CONFIG2 | ||
27 | - nvidia,emc-auto-cal-config3 : EMC_AUTO_CAL_CONFIG3 | ||
28 | - nvidia,emc-auto-cal-interval : EMC_AUTO_CAL_INTERVAL | ||
29 | - nvidia,emc-bgbias-ctl0 : EMC_BGBIAS_CTL0 | ||
30 | - nvidia,emc-cfg : EMC_CFG | ||
31 | - nvidia,emc-cfg-2 : EMC_CFG_2 | ||
32 | - nvidia,emc-ctt-term-ctrl : EMC_CTT_TERM_CTRL | ||
33 | - nvidia,emc-mode-1 : Mode Register 1 | ||
34 | - nvidia,emc-mode-2 : Mode Register 2 | ||
35 | - nvidia,emc-mode-4 : Mode Register 4 | ||
36 | - nvidia,emc-mode-reset : Mode Register 0 | ||
37 | - nvidia,emc-mrs-wait-cnt : EMC_MRS_WAIT_CNT | ||
38 | - nvidia,emc-sel-dpd-ctrl : EMC_SEL_DPD_CTRL | ||
39 | - nvidia,emc-xm2dqspadctrl2 : EMC_XM2DQSPADCTRL2 | ||
40 | - nvidia,emc-zcal-cnt-long : EMC_ZCAL_WAIT_CNT after clock change | ||
41 | - nvidia,emc-zcal-interval : EMC_ZCAL_INTERVAL | ||
42 | - nvidia,emc-configuration : EMC timing characterization data. These are the | ||
43 | registers (see section "15.6.2 EMC Registers" in the TRM) whose values need to | ||
44 | be specified, according to the board documentation: | ||
45 | |||
46 | EMC_RC | ||
47 | EMC_RFC | ||
48 | EMC_RFC_SLR | ||
49 | EMC_RAS | ||
50 | EMC_RP | ||
51 | EMC_R2W | ||
52 | EMC_W2R | ||
53 | EMC_R2P | ||
54 | EMC_W2P | ||
55 | EMC_RD_RCD | ||
56 | EMC_WR_RCD | ||
57 | EMC_RRD | ||
58 | EMC_REXT | ||
59 | EMC_WEXT | ||
60 | EMC_WDV | ||
61 | EMC_WDV_MASK | ||
62 | EMC_QUSE | ||
63 | EMC_QUSE_WIDTH | ||
64 | EMC_IBDLY | ||
65 | EMC_EINPUT | ||
66 | EMC_EINPUT_DURATION | ||
67 | EMC_PUTERM_EXTRA | ||
68 | EMC_PUTERM_WIDTH | ||
69 | EMC_PUTERM_ADJ | ||
70 | EMC_CDB_CNTL_1 | ||
71 | EMC_CDB_CNTL_2 | ||
72 | EMC_CDB_CNTL_3 | ||
73 | EMC_QRST | ||
74 | EMC_QSAFE | ||
75 | EMC_RDV | ||
76 | EMC_RDV_MASK | ||
77 | EMC_REFRESH | ||
78 | EMC_BURST_REFRESH_NUM | ||
79 | EMC_PRE_REFRESH_REQ_CNT | ||
80 | EMC_PDEX2WR | ||
81 | EMC_PDEX2RD | ||
82 | EMC_PCHG2PDEN | ||
83 | EMC_ACT2PDEN | ||
84 | EMC_AR2PDEN | ||
85 | EMC_RW2PDEN | ||
86 | EMC_TXSR | ||
87 | EMC_TXSRDLL | ||
88 | EMC_TCKE | ||
89 | EMC_TCKESR | ||
90 | EMC_TPD | ||
91 | EMC_TFAW | ||
92 | EMC_TRPAB | ||
93 | EMC_TCLKSTABLE | ||
94 | EMC_TCLKSTOP | ||
95 | EMC_TREFBW | ||
96 | EMC_FBIO_CFG6 | ||
97 | EMC_ODT_WRITE | ||
98 | EMC_ODT_READ | ||
99 | EMC_FBIO_CFG5 | ||
100 | EMC_CFG_DIG_DLL | ||
101 | EMC_CFG_DIG_DLL_PERIOD | ||
102 | EMC_DLL_XFORM_DQS0 | ||
103 | EMC_DLL_XFORM_DQS1 | ||
104 | EMC_DLL_XFORM_DQS2 | ||
105 | EMC_DLL_XFORM_DQS3 | ||
106 | EMC_DLL_XFORM_DQS4 | ||
107 | EMC_DLL_XFORM_DQS5 | ||
108 | EMC_DLL_XFORM_DQS6 | ||
109 | EMC_DLL_XFORM_DQS7 | ||
110 | EMC_DLL_XFORM_DQS8 | ||
111 | EMC_DLL_XFORM_DQS9 | ||
112 | EMC_DLL_XFORM_DQS10 | ||
113 | EMC_DLL_XFORM_DQS11 | ||
114 | EMC_DLL_XFORM_DQS12 | ||
115 | EMC_DLL_XFORM_DQS13 | ||
116 | EMC_DLL_XFORM_DQS14 | ||
117 | EMC_DLL_XFORM_DQS15 | ||
118 | EMC_DLL_XFORM_QUSE0 | ||
119 | EMC_DLL_XFORM_QUSE1 | ||
120 | EMC_DLL_XFORM_QUSE2 | ||
121 | EMC_DLL_XFORM_QUSE3 | ||
122 | EMC_DLL_XFORM_QUSE4 | ||
123 | EMC_DLL_XFORM_QUSE5 | ||
124 | EMC_DLL_XFORM_QUSE6 | ||
125 | EMC_DLL_XFORM_QUSE7 | ||
126 | EMC_DLL_XFORM_ADDR0 | ||
127 | EMC_DLL_XFORM_ADDR1 | ||
128 | EMC_DLL_XFORM_ADDR2 | ||
129 | EMC_DLL_XFORM_ADDR3 | ||
130 | EMC_DLL_XFORM_ADDR4 | ||
131 | EMC_DLL_XFORM_ADDR5 | ||
132 | EMC_DLL_XFORM_QUSE8 | ||
133 | EMC_DLL_XFORM_QUSE9 | ||
134 | EMC_DLL_XFORM_QUSE10 | ||
135 | EMC_DLL_XFORM_QUSE11 | ||
136 | EMC_DLL_XFORM_QUSE12 | ||
137 | EMC_DLL_XFORM_QUSE13 | ||
138 | EMC_DLL_XFORM_QUSE14 | ||
139 | EMC_DLL_XFORM_QUSE15 | ||
140 | EMC_DLI_TRIM_TXDQS0 | ||
141 | EMC_DLI_TRIM_TXDQS1 | ||
142 | EMC_DLI_TRIM_TXDQS2 | ||
143 | EMC_DLI_TRIM_TXDQS3 | ||
144 | EMC_DLI_TRIM_TXDQS4 | ||
145 | EMC_DLI_TRIM_TXDQS5 | ||
146 | EMC_DLI_TRIM_TXDQS6 | ||
147 | EMC_DLI_TRIM_TXDQS7 | ||
148 | EMC_DLI_TRIM_TXDQS8 | ||
149 | EMC_DLI_TRIM_TXDQS9 | ||
150 | EMC_DLI_TRIM_TXDQS10 | ||
151 | EMC_DLI_TRIM_TXDQS11 | ||
152 | EMC_DLI_TRIM_TXDQS12 | ||
153 | EMC_DLI_TRIM_TXDQS13 | ||
154 | EMC_DLI_TRIM_TXDQS14 | ||
155 | EMC_DLI_TRIM_TXDQS15 | ||
156 | EMC_DLL_XFORM_DQ0 | ||
157 | EMC_DLL_XFORM_DQ1 | ||
158 | EMC_DLL_XFORM_DQ2 | ||
159 | EMC_DLL_XFORM_DQ3 | ||
160 | EMC_DLL_XFORM_DQ4 | ||
161 | EMC_DLL_XFORM_DQ5 | ||
162 | EMC_DLL_XFORM_DQ6 | ||
163 | EMC_DLL_XFORM_DQ7 | ||
164 | EMC_XM2CMDPADCTRL | ||
165 | EMC_XM2CMDPADCTRL4 | ||
166 | EMC_XM2CMDPADCTRL5 | ||
167 | EMC_XM2DQPADCTRL2 | ||
168 | EMC_XM2DQPADCTRL3 | ||
169 | EMC_XM2CLKPADCTRL | ||
170 | EMC_XM2CLKPADCTRL2 | ||
171 | EMC_XM2COMPPADCTRL | ||
172 | EMC_XM2VTTGENPADCTRL | ||
173 | EMC_XM2VTTGENPADCTRL2 | ||
174 | EMC_XM2VTTGENPADCTRL3 | ||
175 | EMC_XM2DQSPADCTRL3 | ||
176 | EMC_XM2DQSPADCTRL4 | ||
177 | EMC_XM2DQSPADCTRL5 | ||
178 | EMC_XM2DQSPADCTRL6 | ||
179 | EMC_DSR_VTTGEN_DRV | ||
180 | EMC_TXDSRVTTGEN | ||
181 | EMC_FBIO_SPARE | ||
182 | EMC_ZCAL_WAIT_CNT | ||
183 | EMC_MRS_WAIT_CNT2 | ||
184 | EMC_CTT | ||
185 | EMC_CTT_DURATION | ||
186 | EMC_CFG_PIPE | ||
187 | EMC_DYN_SELF_REF_CONTROL | ||
188 | EMC_QPOP | ||
189 | |||
190 | Example SoC include file: | ||
191 | |||
192 | / { | ||
193 | emc@0,7001b000 { | ||
194 | compatible = "nvidia,tegra124-emc"; | ||
195 | reg = <0x0 0x7001b000 0x0 0x1000>; | ||
196 | |||
197 | nvidia,memory-controller = <&mc>; | ||
198 | }; | ||
199 | }; | ||
200 | |||
201 | Example board file: | ||
202 | |||
203 | / { | ||
204 | emc@0,7001b000 { | ||
205 | emc-timings-3 { | ||
206 | nvidia,ram-code = <3>; | ||
207 | |||
208 | timing-12750000 { | ||
209 | clock-frequency = <12750000>; | ||
210 | |||
211 | nvidia,emc-zcal-cnt-long = <0x00000042>; | ||
212 | nvidia,emc-auto-cal-interval = <0x001fffff>; | ||
213 | nvidia,emc-ctt-term-ctrl = <0x00000802>; | ||
214 | nvidia,emc-cfg = <0x73240000>; | ||
215 | nvidia,emc-cfg-2 = <0x000008c5>; | ||
216 | nvidia,emc-sel-dpd-ctrl = <0x00040128>; | ||
217 | nvidia,emc-bgbias-ctl0 = <0x00000008>; | ||
218 | nvidia,emc-auto-cal-config = <0xa1430000>; | ||
219 | nvidia,emc-auto-cal-config2 = <0x00000000>; | ||
220 | nvidia,emc-auto-cal-config3 = <0x00000000>; | ||
221 | nvidia,emc-mode-reset = <0x80001221>; | ||
222 | nvidia,emc-mode-1 = <0x80100003>; | ||
223 | nvidia,emc-mode-2 = <0x80200008>; | ||
224 | nvidia,emc-mode-4 = <0x00000000>; | ||
225 | |||
226 | nvidia,emc-configuration = < | ||
227 | 0x00000000 /* EMC_RC */ | ||
228 | 0x00000003 /* EMC_RFC */ | ||
229 | 0x00000000 /* EMC_RFC_SLR */ | ||
230 | 0x00000000 /* EMC_RAS */ | ||
231 | 0x00000000 /* EMC_RP */ | ||
232 | 0x00000004 /* EMC_R2W */ | ||
233 | 0x0000000a /* EMC_W2R */ | ||
234 | 0x00000003 /* EMC_R2P */ | ||
235 | 0x0000000b /* EMC_W2P */ | ||
236 | 0x00000000 /* EMC_RD_RCD */ | ||
237 | 0x00000000 /* EMC_WR_RCD */ | ||
238 | 0x00000003 /* EMC_RRD */ | ||
239 | 0x00000003 /* EMC_REXT */ | ||
240 | 0x00000000 /* EMC_WEXT */ | ||
241 | 0x00000006 /* EMC_WDV */ | ||
242 | 0x00000006 /* EMC_WDV_MASK */ | ||
243 | 0x00000006 /* EMC_QUSE */ | ||
244 | 0x00000002 /* EMC_QUSE_WIDTH */ | ||
245 | 0x00000000 /* EMC_IBDLY */ | ||
246 | 0x00000005 /* EMC_EINPUT */ | ||
247 | 0x00000005 /* EMC_EINPUT_DURATION */ | ||
248 | 0x00010000 /* EMC_PUTERM_EXTRA */ | ||
249 | 0x00000003 /* EMC_PUTERM_WIDTH */ | ||
250 | 0x00000000 /* EMC_PUTERM_ADJ */ | ||
251 | 0x00000000 /* EMC_CDB_CNTL_1 */ | ||
252 | 0x00000000 /* EMC_CDB_CNTL_2 */ | ||
253 | 0x00000000 /* EMC_CDB_CNTL_3 */ | ||
254 | 0x00000004 /* EMC_QRST */ | ||
255 | 0x0000000c /* EMC_QSAFE */ | ||
256 | 0x0000000d /* EMC_RDV */ | ||
257 | 0x0000000f /* EMC_RDV_MASK */ | ||
258 | 0x00000060 /* EMC_REFRESH */ | ||
259 | 0x00000000 /* EMC_BURST_REFRESH_NUM */ | ||
260 | 0x00000018 /* EMC_PRE_REFRESH_REQ_CNT */ | ||
261 | 0x00000002 /* EMC_PDEX2WR */ | ||
262 | 0x00000002 /* EMC_PDEX2RD */ | ||
263 | 0x00000001 /* EMC_PCHG2PDEN */ | ||
264 | 0x00000000 /* EMC_ACT2PDEN */ | ||
265 | 0x00000007 /* EMC_AR2PDEN */ | ||
266 | 0x0000000f /* EMC_RW2PDEN */ | ||
267 | 0x00000005 /* EMC_TXSR */ | ||
268 | 0x00000005 /* EMC_TXSRDLL */ | ||
269 | 0x00000004 /* EMC_TCKE */ | ||
270 | 0x00000005 /* EMC_TCKESR */ | ||
271 | 0x00000004 /* EMC_TPD */ | ||
272 | 0x00000000 /* EMC_TFAW */ | ||
273 | 0x00000000 /* EMC_TRPAB */ | ||
274 | 0x00000005 /* EMC_TCLKSTABLE */ | ||
275 | 0x00000005 /* EMC_TCLKSTOP */ | ||
276 | 0x00000064 /* EMC_TREFBW */ | ||
277 | 0x00000000 /* EMC_FBIO_CFG6 */ | ||
278 | 0x00000000 /* EMC_ODT_WRITE */ | ||
279 | 0x00000000 /* EMC_ODT_READ */ | ||
280 | 0x106aa298 /* EMC_FBIO_CFG5 */ | ||
281 | 0x002c00a0 /* EMC_CFG_DIG_DLL */ | ||
282 | 0x00008000 /* EMC_CFG_DIG_DLL_PERIOD */ | ||
283 | 0x00064000 /* EMC_DLL_XFORM_DQS0 */ | ||
284 | 0x00064000 /* EMC_DLL_XFORM_DQS1 */ | ||
285 | 0x00064000 /* EMC_DLL_XFORM_DQS2 */ | ||
286 | 0x00064000 /* EMC_DLL_XFORM_DQS3 */ | ||
287 | 0x00064000 /* EMC_DLL_XFORM_DQS4 */ | ||
288 | 0x00064000 /* EMC_DLL_XFORM_DQS5 */ | ||
289 | 0x00064000 /* EMC_DLL_XFORM_DQS6 */ | ||
290 | 0x00064000 /* EMC_DLL_XFORM_DQS7 */ | ||
291 | 0x00064000 /* EMC_DLL_XFORM_DQS8 */ | ||
292 | 0x00064000 /* EMC_DLL_XFORM_DQS9 */ | ||
293 | 0x00064000 /* EMC_DLL_XFORM_DQS10 */ | ||
294 | 0x00064000 /* EMC_DLL_XFORM_DQS11 */ | ||
295 | 0x00064000 /* EMC_DLL_XFORM_DQS12 */ | ||
296 | 0x00064000 /* EMC_DLL_XFORM_DQS13 */ | ||
297 | 0x00064000 /* EMC_DLL_XFORM_DQS14 */ | ||
298 | 0x00064000 /* EMC_DLL_XFORM_DQS15 */ | ||
299 | 0x00000000 /* EMC_DLL_XFORM_QUSE0 */ | ||
300 | 0x00000000 /* EMC_DLL_XFORM_QUSE1 */ | ||
301 | 0x00000000 /* EMC_DLL_XFORM_QUSE2 */ | ||
302 | 0x00000000 /* EMC_DLL_XFORM_QUSE3 */ | ||
303 | 0x00000000 /* EMC_DLL_XFORM_QUSE4 */ | ||
304 | 0x00000000 /* EMC_DLL_XFORM_QUSE5 */ | ||
305 | 0x00000000 /* EMC_DLL_XFORM_QUSE6 */ | ||
306 | 0x00000000 /* EMC_DLL_XFORM_QUSE7 */ | ||
307 | 0x00000000 /* EMC_DLL_XFORM_ADDR0 */ | ||
308 | 0x00000000 /* EMC_DLL_XFORM_ADDR1 */ | ||
309 | 0x00000000 /* EMC_DLL_XFORM_ADDR2 */ | ||
310 | 0x00000000 /* EMC_DLL_XFORM_ADDR3 */ | ||
311 | 0x00000000 /* EMC_DLL_XFORM_ADDR4 */ | ||
312 | 0x00000000 /* EMC_DLL_XFORM_ADDR5 */ | ||
313 | 0x00000000 /* EMC_DLL_XFORM_QUSE8 */ | ||
314 | 0x00000000 /* EMC_DLL_XFORM_QUSE9 */ | ||
315 | 0x00000000 /* EMC_DLL_XFORM_QUSE10 */ | ||
316 | 0x00000000 /* EMC_DLL_XFORM_QUSE11 */ | ||
317 | 0x00000000 /* EMC_DLL_XFORM_QUSE12 */ | ||
318 | 0x00000000 /* EMC_DLL_XFORM_QUSE13 */ | ||
319 | 0x00000000 /* EMC_DLL_XFORM_QUSE14 */ | ||
320 | 0x00000000 /* EMC_DLL_XFORM_QUSE15 */ | ||
321 | 0x00000000 /* EMC_DLI_TRIM_TXDQS0 */ | ||
322 | 0x00000000 /* EMC_DLI_TRIM_TXDQS1 */ | ||
323 | 0x00000000 /* EMC_DLI_TRIM_TXDQS2 */ | ||
324 | 0x00000000 /* EMC_DLI_TRIM_TXDQS3 */ | ||
325 | 0x00000000 /* EMC_DLI_TRIM_TXDQS4 */ | ||
326 | 0x00000000 /* EMC_DLI_TRIM_TXDQS5 */ | ||
327 | 0x00000000 /* EMC_DLI_TRIM_TXDQS6 */ | ||
328 | 0x00000000 /* EMC_DLI_TRIM_TXDQS7 */ | ||
329 | 0x00000000 /* EMC_DLI_TRIM_TXDQS8 */ | ||
330 | 0x00000000 /* EMC_DLI_TRIM_TXDQS9 */ | ||
331 | 0x00000000 /* EMC_DLI_TRIM_TXDQS10 */ | ||
332 | 0x00000000 /* EMC_DLI_TRIM_TXDQS11 */ | ||
333 | 0x00000000 /* EMC_DLI_TRIM_TXDQS12 */ | ||
334 | 0x00000000 /* EMC_DLI_TRIM_TXDQS13 */ | ||
335 | 0x00000000 /* EMC_DLI_TRIM_TXDQS14 */ | ||
336 | 0x00000000 /* EMC_DLI_TRIM_TXDQS15 */ | ||
337 | 0x000fc000 /* EMC_DLL_XFORM_DQ0 */ | ||
338 | 0x000fc000 /* EMC_DLL_XFORM_DQ1 */ | ||
339 | 0x000fc000 /* EMC_DLL_XFORM_DQ2 */ | ||
340 | 0x000fc000 /* EMC_DLL_XFORM_DQ3 */ | ||
341 | 0x0000fc00 /* EMC_DLL_XFORM_DQ4 */ | ||
342 | 0x0000fc00 /* EMC_DLL_XFORM_DQ5 */ | ||
343 | 0x0000fc00 /* EMC_DLL_XFORM_DQ6 */ | ||
344 | 0x0000fc00 /* EMC_DLL_XFORM_DQ7 */ | ||
345 | 0x10000280 /* EMC_XM2CMDPADCTRL */ | ||
346 | 0x00000000 /* EMC_XM2CMDPADCTRL4 */ | ||
347 | 0x00111111 /* EMC_XM2CMDPADCTRL5 */ | ||
348 | 0x00000000 /* EMC_XM2DQPADCTRL2 */ | ||
349 | 0x00000000 /* EMC_XM2DQPADCTRL3 */ | ||
350 | 0x77ffc081 /* EMC_XM2CLKPADCTRL */ | ||
351 | 0x00000e0e /* EMC_XM2CLKPADCTRL2 */ | ||
352 | 0x81f1f108 /* EMC_XM2COMPPADCTRL */ | ||
353 | 0x07070004 /* EMC_XM2VTTGENPADCTRL */ | ||
354 | 0x0000003f /* EMC_XM2VTTGENPADCTRL2 */ | ||
355 | 0x016eeeee /* EMC_XM2VTTGENPADCTRL3 */ | ||
356 | 0x51451400 /* EMC_XM2DQSPADCTRL3 */ | ||
357 | 0x00514514 /* EMC_XM2DQSPADCTRL4 */ | ||
358 | 0x00514514 /* EMC_XM2DQSPADCTRL5 */ | ||
359 | 0x51451400 /* EMC_XM2DQSPADCTRL6 */ | ||
360 | 0x0000003f /* EMC_DSR_VTTGEN_DRV */ | ||
361 | 0x00000007 /* EMC_TXDSRVTTGEN */ | ||
362 | 0x00000000 /* EMC_FBIO_SPARE */ | ||
363 | 0x00000042 /* EMC_ZCAL_WAIT_CNT */ | ||
364 | 0x000e000e /* EMC_MRS_WAIT_CNT2 */ | ||
365 | 0x00000000 /* EMC_CTT */ | ||
366 | 0x00000003 /* EMC_CTT_DURATION */ | ||
367 | 0x0000f2f3 /* EMC_CFG_PIPE */ | ||
368 | 0x800001c5 /* EMC_DYN_SELF_REF_CONTROL */ | ||
369 | 0x0000000a /* EMC_QPOP */ | ||
370 | >; | ||
371 | }; | ||
372 | }; | ||
373 | }; | ||
374 | }; | ||
diff --git a/Documentation/devicetree/bindings/mfd/mfd.txt b/Documentation/devicetree/bindings/mfd/mfd.txt new file mode 100644 index 000000000000..af9d6931a1a2 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/mfd.txt | |||
@@ -0,0 +1,41 @@ | |||
1 | Multi-Function Devices (MFD) | ||
2 | |||
3 | These devices comprise a nexus for heterogeneous hardware blocks containing | ||
4 | more than one non-unique yet varying hardware functionality. | ||
5 | |||
6 | A typical MFD can be: | ||
7 | |||
8 | - A mixed signal ASIC on an external bus, sometimes a PMIC (Power Management | ||
9 | Integrated Circuit) that is manufactured in a lower technology node (rough | ||
10 | silicon) that handles analog drivers for things like audio amplifiers, LED | ||
11 | drivers, level shifters, PHY (physical interfaces to things like USB or | ||
12 | ethernet), regulators etc. | ||
13 | |||
14 | - A range of memory registers containing "miscellaneous system registers" also | ||
15 | known as a system controller "syscon" or any other memory range containing a | ||
16 | mix of unrelated hardware devices. | ||
17 | |||
18 | Optional properties: | ||
19 | |||
20 | - compatible : "simple-mfd" - this signifies that the operating system should | ||
21 | consider all subnodes of the MFD device as separate devices akin to how | ||
22 | "simple-bus" inidicates when to see subnodes as children for a simple | ||
23 | memory-mapped bus. For more complex devices, when the nexus driver has to | ||
24 | probe registers to figure out what child devices exist etc, this should not | ||
25 | be used. In the latter case the child devices will be determined by the | ||
26 | operating system. | ||
27 | |||
28 | Example: | ||
29 | |||
30 | foo@1000 { | ||
31 | compatible = "syscon", "simple-mfd"; | ||
32 | reg = <0x01000 0x1000>; | ||
33 | |||
34 | led@08.0 { | ||
35 | compatible = "register-bit-led"; | ||
36 | offset = <0x08>; | ||
37 | mask = <0x01>; | ||
38 | label = "myled"; | ||
39 | default-state = "on"; | ||
40 | }; | ||
41 | }; | ||
diff --git a/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt b/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt index 47b205cc9cc7..4556359c5876 100644 --- a/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt +++ b/Documentation/devicetree/bindings/misc/nvidia,tegra20-apbmisc.txt | |||
@@ -10,3 +10,5 @@ Required properties: | |||
10 | The second entry gives the physical address and length of the | 10 | The second entry gives the physical address and length of the |
11 | registers indicating the strapping options. | 11 | registers indicating the strapping options. |
12 | 12 | ||
13 | Optional properties: | ||
14 | - nvidia,long-ram-code: If present, the RAM code is long (4 bit). If not, short (2 bit). | ||
diff --git a/Documentation/devicetree/bindings/soc/sunxi/sram.txt b/Documentation/devicetree/bindings/soc/sunxi/sram.txt new file mode 100644 index 000000000000..067698112f5f --- /dev/null +++ b/Documentation/devicetree/bindings/soc/sunxi/sram.txt | |||
@@ -0,0 +1,72 @@ | |||
1 | Allwinnner SoC SRAM controllers | ||
2 | ----------------------------------------------------- | ||
3 | |||
4 | The SRAM controller found on most Allwinner devices is represented by | ||
5 | a regular node for the SRAM controller itself, with sub-nodes | ||
6 | reprensenting the SRAM handled by the SRAM controller. | ||
7 | |||
8 | Controller Node | ||
9 | --------------- | ||
10 | |||
11 | Required properties: | ||
12 | - compatible : "allwinner,sun4i-a10-sram-controller" | ||
13 | - reg : sram controller register offset + length | ||
14 | |||
15 | SRAM nodes | ||
16 | ---------- | ||
17 | |||
18 | Each SRAM is described using the mmio-sram bindings documented in | ||
19 | Documentation/devicetree/bindings/misc/sram.txt | ||
20 | |||
21 | Each SRAM will have SRAM sections that are going to be handled by the | ||
22 | SRAM controller as subnodes. These sections are represented following | ||
23 | once again the representation described in the mmio-sram binding. | ||
24 | |||
25 | The valid sections compatible are: | ||
26 | - allwinner,sun4i-a10-sram-a3-a4 | ||
27 | - allwinner,sun4i-a10-sram-d | ||
28 | |||
29 | Devices using SRAM sections | ||
30 | --------------------------- | ||
31 | |||
32 | Some devices need to request to the SRAM controller to map an SRAM for | ||
33 | their exclusive use. | ||
34 | |||
35 | The relationship between such a device and an SRAM section is | ||
36 | expressed through the allwinner,sram property, that will take a | ||
37 | phandle and an argument. | ||
38 | |||
39 | This valid values for this argument are: | ||
40 | - 0: CPU | ||
41 | - 1: Device | ||
42 | |||
43 | Example | ||
44 | ------- | ||
45 | sram-controller@01c00000 { | ||
46 | compatible = "allwinner,sun4i-a10-sram-controller"; | ||
47 | reg = <0x01c00000 0x30>; | ||
48 | #address-cells = <1>; | ||
49 | #size-cells = <1>; | ||
50 | ranges; | ||
51 | |||
52 | sram_a: sram@00000000 { | ||
53 | compatible = "mmio-sram"; | ||
54 | reg = <0x00000000 0xc000>; | ||
55 | #address-cells = <1>; | ||
56 | #size-cells = <1>; | ||
57 | ranges = <0 0x00000000 0xc000>; | ||
58 | |||
59 | emac_sram: sram-section@8000 { | ||
60 | compatible = "allwinner,sun4i-a10-sram-a3-a4"; | ||
61 | reg = <0x8000 0x4000>; | ||
62 | status = "disabled"; | ||
63 | }; | ||
64 | }; | ||
65 | }; | ||
66 | |||
67 | emac: ethernet@01c0b000 { | ||
68 | compatible = "allwinner,sun4i-a10-emac"; | ||
69 | ... | ||
70 | |||
71 | allwinner,sram = <&emac_sram 1>; | ||
72 | }; | ||
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index ff26c7ed8c41..1bc64cda819e 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts | |||
@@ -114,7 +114,7 @@ | |||
114 | ranges; | 114 | ranges; |
115 | 115 | ||
116 | syscon: syscon@10000000 { | 116 | syscon: syscon@10000000 { |
117 | compatible = "arm,realview-pb1176-syscon", "syscon"; | 117 | compatible = "arm,realview-pb1176-syscon", "syscon", "simple-mfd"; |
118 | reg = <0x10000000 0x1000>; | 118 | reg = <0x10000000 0x1000>; |
119 | 119 | ||
120 | led@08.0 { | 120 | led@08.0 { |
diff --git a/arch/arm/boot/dts/berlin2.dtsi b/arch/arm/boot/dts/berlin2.dtsi index da9adf8f69da..ef811de09908 100644 --- a/arch/arm/boot/dts/berlin2.dtsi +++ b/arch/arm/boot/dts/berlin2.dtsi | |||
@@ -84,7 +84,7 @@ | |||
84 | sdhci0: sdhci@ab0000 { | 84 | sdhci0: sdhci@ab0000 { |
85 | compatible = "mrvl,pxav3-mmc"; | 85 | compatible = "mrvl,pxav3-mmc"; |
86 | reg = <0xab0000 0x200>; | 86 | reg = <0xab0000 0x200>; |
87 | clocks = <&chip CLKID_SDIO0XIN>, <&chip CLKID_SDIO0>; | 87 | clocks = <&chip_clk CLKID_SDIO0XIN>, <&chip_clk CLKID_SDIO0>; |
88 | clock-names = "io", "core"; | 88 | clock-names = "io", "core"; |
89 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; | 89 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; |
90 | status = "disabled"; | 90 | status = "disabled"; |
@@ -93,7 +93,7 @@ | |||
93 | sdhci1: sdhci@ab0800 { | 93 | sdhci1: sdhci@ab0800 { |
94 | compatible = "mrvl,pxav3-mmc"; | 94 | compatible = "mrvl,pxav3-mmc"; |
95 | reg = <0xab0800 0x200>; | 95 | reg = <0xab0800 0x200>; |
96 | clocks = <&chip CLKID_SDIO1XIN>, <&chip CLKID_SDIO1>; | 96 | clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO1>; |
97 | clock-names = "io", "core"; | 97 | clock-names = "io", "core"; |
98 | interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; | 98 | interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; |
99 | status = "disabled"; | 99 | status = "disabled"; |
@@ -103,7 +103,7 @@ | |||
103 | compatible = "mrvl,pxav3-mmc"; | 103 | compatible = "mrvl,pxav3-mmc"; |
104 | reg = <0xab1000 0x200>; | 104 | reg = <0xab1000 0x200>; |
105 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; | 105 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; |
106 | clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>; | 106 | clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>; |
107 | clock-names = "io", "core"; | 107 | clock-names = "io", "core"; |
108 | pinctrl-0 = <&emmc_pmux>; | 108 | pinctrl-0 = <&emmc_pmux>; |
109 | pinctrl-names = "default"; | 109 | pinctrl-names = "default"; |
@@ -133,13 +133,13 @@ | |||
133 | compatible = "arm,cortex-a9-twd-timer"; | 133 | compatible = "arm,cortex-a9-twd-timer"; |
134 | reg = <0xad0600 0x20>; | 134 | reg = <0xad0600 0x20>; |
135 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>; | 135 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>; |
136 | clocks = <&chip CLKID_TWD>; | 136 | clocks = <&chip_clk CLKID_TWD>; |
137 | }; | 137 | }; |
138 | 138 | ||
139 | eth1: ethernet@b90000 { | 139 | eth1: ethernet@b90000 { |
140 | compatible = "marvell,pxa168-eth"; | 140 | compatible = "marvell,pxa168-eth"; |
141 | reg = <0xb90000 0x10000>; | 141 | reg = <0xb90000 0x10000>; |
142 | clocks = <&chip CLKID_GETH1>; | 142 | clocks = <&chip_clk CLKID_GETH1>; |
143 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; | 143 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; |
144 | /* set by bootloader */ | 144 | /* set by bootloader */ |
145 | local-mac-address = [00 00 00 00 00 00]; | 145 | local-mac-address = [00 00 00 00 00 00]; |
@@ -162,7 +162,7 @@ | |||
162 | eth0: ethernet@e50000 { | 162 | eth0: ethernet@e50000 { |
163 | compatible = "marvell,pxa168-eth"; | 163 | compatible = "marvell,pxa168-eth"; |
164 | reg = <0xe50000 0x10000>; | 164 | reg = <0xe50000 0x10000>; |
165 | clocks = <&chip CLKID_GETH0>; | 165 | clocks = <&chip_clk CLKID_GETH0>; |
166 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; | 166 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; |
167 | /* set by bootloader */ | 167 | /* set by bootloader */ |
168 | local-mac-address = [00 00 00 00 00 00]; | 168 | local-mac-address = [00 00 00 00 00 00]; |
@@ -261,7 +261,7 @@ | |||
261 | compatible = "snps,dw-apb-timer"; | 261 | compatible = "snps,dw-apb-timer"; |
262 | reg = <0x2c00 0x14>; | 262 | reg = <0x2c00 0x14>; |
263 | interrupts = <8>; | 263 | interrupts = <8>; |
264 | clocks = <&chip CLKID_CFG>; | 264 | clocks = <&chip_clk CLKID_CFG>; |
265 | clock-names = "timer"; | 265 | clock-names = "timer"; |
266 | status = "okay"; | 266 | status = "okay"; |
267 | }; | 267 | }; |
@@ -270,7 +270,7 @@ | |||
270 | compatible = "snps,dw-apb-timer"; | 270 | compatible = "snps,dw-apb-timer"; |
271 | reg = <0x2c14 0x14>; | 271 | reg = <0x2c14 0x14>; |
272 | interrupts = <9>; | 272 | interrupts = <9>; |
273 | clocks = <&chip CLKID_CFG>; | 273 | clocks = <&chip_clk CLKID_CFG>; |
274 | clock-names = "timer"; | 274 | clock-names = "timer"; |
275 | status = "okay"; | 275 | status = "okay"; |
276 | }; | 276 | }; |
@@ -279,7 +279,7 @@ | |||
279 | compatible = "snps,dw-apb-timer"; | 279 | compatible = "snps,dw-apb-timer"; |
280 | reg = <0x2c28 0x14>; | 280 | reg = <0x2c28 0x14>; |
281 | interrupts = <10>; | 281 | interrupts = <10>; |
282 | clocks = <&chip CLKID_CFG>; | 282 | clocks = <&chip_clk CLKID_CFG>; |
283 | clock-names = "timer"; | 283 | clock-names = "timer"; |
284 | status = "disabled"; | 284 | status = "disabled"; |
285 | }; | 285 | }; |
@@ -288,7 +288,7 @@ | |||
288 | compatible = "snps,dw-apb-timer"; | 288 | compatible = "snps,dw-apb-timer"; |
289 | reg = <0x2c3c 0x14>; | 289 | reg = <0x2c3c 0x14>; |
290 | interrupts = <11>; | 290 | interrupts = <11>; |
291 | clocks = <&chip CLKID_CFG>; | 291 | clocks = <&chip_clk CLKID_CFG>; |
292 | clock-names = "timer"; | 292 | clock-names = "timer"; |
293 | status = "disabled"; | 293 | status = "disabled"; |
294 | }; | 294 | }; |
@@ -297,7 +297,7 @@ | |||
297 | compatible = "snps,dw-apb-timer"; | 297 | compatible = "snps,dw-apb-timer"; |
298 | reg = <0x2c50 0x14>; | 298 | reg = <0x2c50 0x14>; |
299 | interrupts = <12>; | 299 | interrupts = <12>; |
300 | clocks = <&chip CLKID_CFG>; | 300 | clocks = <&chip_clk CLKID_CFG>; |
301 | clock-names = "timer"; | 301 | clock-names = "timer"; |
302 | status = "disabled"; | 302 | status = "disabled"; |
303 | }; | 303 | }; |
@@ -306,7 +306,7 @@ | |||
306 | compatible = "snps,dw-apb-timer"; | 306 | compatible = "snps,dw-apb-timer"; |
307 | reg = <0x2c64 0x14>; | 307 | reg = <0x2c64 0x14>; |
308 | interrupts = <13>; | 308 | interrupts = <13>; |
309 | clocks = <&chip CLKID_CFG>; | 309 | clocks = <&chip_clk CLKID_CFG>; |
310 | clock-names = "timer"; | 310 | clock-names = "timer"; |
311 | status = "disabled"; | 311 | status = "disabled"; |
312 | }; | 312 | }; |
@@ -315,7 +315,7 @@ | |||
315 | compatible = "snps,dw-apb-timer"; | 315 | compatible = "snps,dw-apb-timer"; |
316 | reg = <0x2c78 0x14>; | 316 | reg = <0x2c78 0x14>; |
317 | interrupts = <14>; | 317 | interrupts = <14>; |
318 | clocks = <&chip CLKID_CFG>; | 318 | clocks = <&chip_clk CLKID_CFG>; |
319 | clock-names = "timer"; | 319 | clock-names = "timer"; |
320 | status = "disabled"; | 320 | status = "disabled"; |
321 | }; | 321 | }; |
@@ -324,7 +324,7 @@ | |||
324 | compatible = "snps,dw-apb-timer"; | 324 | compatible = "snps,dw-apb-timer"; |
325 | reg = <0x2c8c 0x14>; | 325 | reg = <0x2c8c 0x14>; |
326 | interrupts = <15>; | 326 | interrupts = <15>; |
327 | clocks = <&chip CLKID_CFG>; | 327 | clocks = <&chip_clk CLKID_CFG>; |
328 | clock-names = "timer"; | 328 | clock-names = "timer"; |
329 | status = "disabled"; | 329 | status = "disabled"; |
330 | }; | 330 | }; |
@@ -343,7 +343,7 @@ | |||
343 | compatible = "marvell,berlin2-ahci", "generic-ahci"; | 343 | compatible = "marvell,berlin2-ahci", "generic-ahci"; |
344 | reg = <0xe90000 0x1000>; | 344 | reg = <0xe90000 0x1000>; |
345 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; | 345 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; |
346 | clocks = <&chip CLKID_SATA>; | 346 | clocks = <&chip_clk CLKID_SATA>; |
347 | #address-cells = <1>; | 347 | #address-cells = <1>; |
348 | #size-cells = <0>; | 348 | #size-cells = <0>; |
349 | 349 | ||
@@ -363,7 +363,7 @@ | |||
363 | sata_phy: phy@e900a0 { | 363 | sata_phy: phy@e900a0 { |
364 | compatible = "marvell,berlin2-sata-phy"; | 364 | compatible = "marvell,berlin2-sata-phy"; |
365 | reg = <0xe900a0 0x200>; | 365 | reg = <0xe900a0 0x200>; |
366 | clocks = <&chip CLKID_SATA>; | 366 | clocks = <&chip_clk CLKID_SATA>; |
367 | #address-cells = <1>; | 367 | #address-cells = <1>; |
368 | #size-cells = <0>; | 368 | #size-cells = <0>; |
369 | #phy-cells = <1>; | 369 | #phy-cells = <1>; |
@@ -379,16 +379,28 @@ | |||
379 | }; | 379 | }; |
380 | 380 | ||
381 | chip: chip-control@ea0000 { | 381 | chip: chip-control@ea0000 { |
382 | compatible = "marvell,berlin2-chip-ctrl"; | 382 | compatible = "simple-mfd", "syscon"; |
383 | #clock-cells = <1>; | ||
384 | #reset-cells = <2>; | ||
385 | reg = <0xea0000 0x400>; | 383 | reg = <0xea0000 0x400>; |
386 | clocks = <&refclk>; | ||
387 | clock-names = "refclk"; | ||
388 | 384 | ||
389 | emmc_pmux: emmc-pmux { | 385 | chip_clk: clock { |
390 | groups = "G26"; | 386 | compatible = "marvell,berlin2-clk"; |
391 | function = "emmc"; | 387 | #clock-cells = <1>; |
388 | clocks = <&refclk>; | ||
389 | clock-names = "refclk"; | ||
390 | }; | ||
391 | |||
392 | soc_pinctrl: pin-controller { | ||
393 | compatible = "marvell,berlin2-soc-pinctrl"; | ||
394 | |||
395 | emmc_pmux: emmc-pmux { | ||
396 | groups = "G26"; | ||
397 | function = "emmc"; | ||
398 | }; | ||
399 | }; | ||
400 | |||
401 | chip_rst: reset { | ||
402 | compatible = "marvell,berlin2-reset"; | ||
403 | #reset-cells = <2>; | ||
392 | }; | 404 | }; |
393 | }; | 405 | }; |
394 | 406 | ||
@@ -470,22 +482,24 @@ | |||
470 | }; | 482 | }; |
471 | 483 | ||
472 | sysctrl: system-controller@d000 { | 484 | sysctrl: system-controller@d000 { |
473 | compatible = "marvell,berlin2-system-ctrl"; | 485 | compatible = "simple-mfd", "syscon"; |
474 | reg = <0xd000 0x100>; | 486 | reg = <0xd000 0x100>; |
475 | 487 | ||
476 | uart0_pmux: uart0-pmux { | 488 | sys_pinctrl: pin-controller { |
477 | groups = "GSM4"; | 489 | compatible = "marvell,berlin2-system-pinctrl"; |
478 | function = "uart0"; | 490 | uart0_pmux: uart0-pmux { |
479 | }; | 491 | groups = "GSM4"; |
480 | 492 | function = "uart0"; | |
481 | uart1_pmux: uart1-pmux { | 493 | }; |
482 | groups = "GSM5"; | 494 | |
483 | function = "uart1"; | 495 | uart1_pmux: uart1-pmux { |
484 | }; | 496 | groups = "GSM5"; |
485 | 497 | function = "uart1"; | |
486 | uart2_pmux: uart2-pmux { | 498 | }; |
487 | groups = "GSM3"; | 499 | uart2_pmux: uart2-pmux { |
488 | function = "uart2"; | 500 | groups = "GSM3"; |
501 | function = "uart2"; | ||
502 | }; | ||
489 | }; | 503 | }; |
490 | }; | 504 | }; |
491 | 505 | ||
diff --git a/arch/arm/boot/dts/berlin2cd.dtsi b/arch/arm/boot/dts/berlin2cd.dtsi index cb2a97cb9579..900213d78a32 100644 --- a/arch/arm/boot/dts/berlin2cd.dtsi +++ b/arch/arm/boot/dts/berlin2cd.dtsi | |||
@@ -81,7 +81,7 @@ | |||
81 | sdhci0: sdhci@ab0000 { | 81 | sdhci0: sdhci@ab0000 { |
82 | compatible = "mrvl,pxav3-mmc"; | 82 | compatible = "mrvl,pxav3-mmc"; |
83 | reg = <0xab0000 0x200>; | 83 | reg = <0xab0000 0x200>; |
84 | clocks = <&chip CLKID_SDIO0XIN>, <&chip CLKID_SDIO0>; | 84 | clocks = <&chip_clk CLKID_SDIO0XIN>, <&chip_clk CLKID_SDIO0>; |
85 | clock-names = "io", "core"; | 85 | clock-names = "io", "core"; |
86 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; | 86 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; |
87 | status = "disabled"; | 87 | status = "disabled"; |
@@ -105,14 +105,14 @@ | |||
105 | compatible = "arm,cortex-a9-twd-timer"; | 105 | compatible = "arm,cortex-a9-twd-timer"; |
106 | reg = <0xad0600 0x20>; | 106 | reg = <0xad0600 0x20>; |
107 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>; | 107 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>; |
108 | clocks = <&chip CLKID_TWD>; | 108 | clocks = <&chip_clk CLKID_TWD>; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | usb_phy0: usb-phy@b74000 { | 111 | usb_phy0: usb-phy@b74000 { |
112 | compatible = "marvell,berlin2cd-usb-phy"; | 112 | compatible = "marvell,berlin2cd-usb-phy"; |
113 | reg = <0xb74000 0x128>; | 113 | reg = <0xb74000 0x128>; |
114 | #phy-cells = <0>; | 114 | #phy-cells = <0>; |
115 | resets = <&chip 0x178 23>; | 115 | resets = <&chip_rst 0x178 23>; |
116 | status = "disabled"; | 116 | status = "disabled"; |
117 | }; | 117 | }; |
118 | 118 | ||
@@ -120,14 +120,14 @@ | |||
120 | compatible = "marvell,berlin2cd-usb-phy"; | 120 | compatible = "marvell,berlin2cd-usb-phy"; |
121 | reg = <0xb78000 0x128>; | 121 | reg = <0xb78000 0x128>; |
122 | #phy-cells = <0>; | 122 | #phy-cells = <0>; |
123 | resets = <&chip 0x178 24>; | 123 | resets = <&chip_rst 0x178 24>; |
124 | status = "disabled"; | 124 | status = "disabled"; |
125 | }; | 125 | }; |
126 | 126 | ||
127 | eth1: ethernet@b90000 { | 127 | eth1: ethernet@b90000 { |
128 | compatible = "marvell,pxa168-eth"; | 128 | compatible = "marvell,pxa168-eth"; |
129 | reg = <0xb90000 0x10000>; | 129 | reg = <0xb90000 0x10000>; |
130 | clocks = <&chip CLKID_GETH1>; | 130 | clocks = <&chip_clk CLKID_GETH1>; |
131 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; | 131 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; |
132 | /* set by bootloader */ | 132 | /* set by bootloader */ |
133 | local-mac-address = [00 00 00 00 00 00]; | 133 | local-mac-address = [00 00 00 00 00 00]; |
@@ -145,7 +145,7 @@ | |||
145 | eth0: ethernet@e50000 { | 145 | eth0: ethernet@e50000 { |
146 | compatible = "marvell,pxa168-eth"; | 146 | compatible = "marvell,pxa168-eth"; |
147 | reg = <0xe50000 0x10000>; | 147 | reg = <0xe50000 0x10000>; |
148 | clocks = <&chip CLKID_GETH0>; | 148 | clocks = <&chip_clk CLKID_GETH0>; |
149 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; | 149 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; |
150 | /* set by bootloader */ | 150 | /* set by bootloader */ |
151 | local-mac-address = [00 00 00 00 00 00]; | 151 | local-mac-address = [00 00 00 00 00 00]; |
@@ -244,7 +244,7 @@ | |||
244 | compatible = "snps,dw-apb-timer"; | 244 | compatible = "snps,dw-apb-timer"; |
245 | reg = <0x2c00 0x14>; | 245 | reg = <0x2c00 0x14>; |
246 | interrupts = <8>; | 246 | interrupts = <8>; |
247 | clocks = <&chip CLKID_CFG>; | 247 | clocks = <&chip_clk CLKID_CFG>; |
248 | clock-names = "timer"; | 248 | clock-names = "timer"; |
249 | status = "okay"; | 249 | status = "okay"; |
250 | }; | 250 | }; |
@@ -253,7 +253,7 @@ | |||
253 | compatible = "snps,dw-apb-timer"; | 253 | compatible = "snps,dw-apb-timer"; |
254 | reg = <0x2c14 0x14>; | 254 | reg = <0x2c14 0x14>; |
255 | interrupts = <9>; | 255 | interrupts = <9>; |
256 | clocks = <&chip CLKID_CFG>; | 256 | clocks = <&chip_clk CLKID_CFG>; |
257 | clock-names = "timer"; | 257 | clock-names = "timer"; |
258 | status = "okay"; | 258 | status = "okay"; |
259 | }; | 259 | }; |
@@ -262,7 +262,7 @@ | |||
262 | compatible = "snps,dw-apb-timer"; | 262 | compatible = "snps,dw-apb-timer"; |
263 | reg = <0x2c28 0x14>; | 263 | reg = <0x2c28 0x14>; |
264 | interrupts = <10>; | 264 | interrupts = <10>; |
265 | clocks = <&chip CLKID_CFG>; | 265 | clocks = <&chip_clk CLKID_CFG>; |
266 | clock-names = "timer"; | 266 | clock-names = "timer"; |
267 | status = "disabled"; | 267 | status = "disabled"; |
268 | }; | 268 | }; |
@@ -271,7 +271,7 @@ | |||
271 | compatible = "snps,dw-apb-timer"; | 271 | compatible = "snps,dw-apb-timer"; |
272 | reg = <0x2c3c 0x14>; | 272 | reg = <0x2c3c 0x14>; |
273 | interrupts = <11>; | 273 | interrupts = <11>; |
274 | clocks = <&chip CLKID_CFG>; | 274 | clocks = <&chip_clk CLKID_CFG>; |
275 | clock-names = "timer"; | 275 | clock-names = "timer"; |
276 | status = "disabled"; | 276 | status = "disabled"; |
277 | }; | 277 | }; |
@@ -280,7 +280,7 @@ | |||
280 | compatible = "snps,dw-apb-timer"; | 280 | compatible = "snps,dw-apb-timer"; |
281 | reg = <0x2c50 0x14>; | 281 | reg = <0x2c50 0x14>; |
282 | interrupts = <12>; | 282 | interrupts = <12>; |
283 | clocks = <&chip CLKID_CFG>; | 283 | clocks = <&chip_clk CLKID_CFG>; |
284 | clock-names = "timer"; | 284 | clock-names = "timer"; |
285 | status = "disabled"; | 285 | status = "disabled"; |
286 | }; | 286 | }; |
@@ -289,7 +289,7 @@ | |||
289 | compatible = "snps,dw-apb-timer"; | 289 | compatible = "snps,dw-apb-timer"; |
290 | reg = <0x2c64 0x14>; | 290 | reg = <0x2c64 0x14>; |
291 | interrupts = <13>; | 291 | interrupts = <13>; |
292 | clocks = <&chip CLKID_CFG>; | 292 | clocks = <&chip_clk CLKID_CFG>; |
293 | clock-names = "timer"; | 293 | clock-names = "timer"; |
294 | status = "disabled"; | 294 | status = "disabled"; |
295 | }; | 295 | }; |
@@ -298,7 +298,7 @@ | |||
298 | compatible = "snps,dw-apb-timer"; | 298 | compatible = "snps,dw-apb-timer"; |
299 | reg = <0x2c78 0x14>; | 299 | reg = <0x2c78 0x14>; |
300 | interrupts = <14>; | 300 | interrupts = <14>; |
301 | clocks = <&chip CLKID_CFG>; | 301 | clocks = <&chip_clk CLKID_CFG>; |
302 | clock-names = "timer"; | 302 | clock-names = "timer"; |
303 | status = "disabled"; | 303 | status = "disabled"; |
304 | }; | 304 | }; |
@@ -307,7 +307,7 @@ | |||
307 | compatible = "snps,dw-apb-timer"; | 307 | compatible = "snps,dw-apb-timer"; |
308 | reg = <0x2c8c 0x14>; | 308 | reg = <0x2c8c 0x14>; |
309 | interrupts = <15>; | 309 | interrupts = <15>; |
310 | clocks = <&chip CLKID_CFG>; | 310 | clocks = <&chip_clk CLKID_CFG>; |
311 | clock-names = "timer"; | 311 | clock-names = "timer"; |
312 | status = "disabled"; | 312 | status = "disabled"; |
313 | }; | 313 | }; |
@@ -323,16 +323,28 @@ | |||
323 | }; | 323 | }; |
324 | 324 | ||
325 | chip: chip-control@ea0000 { | 325 | chip: chip-control@ea0000 { |
326 | compatible = "marvell,berlin2cd-chip-ctrl"; | 326 | compatible = "simple-mfd", "syscon"; |
327 | #clock-cells = <1>; | ||
328 | #reset-cells = <2>; | ||
329 | reg = <0xea0000 0x400>; | 327 | reg = <0xea0000 0x400>; |
330 | clocks = <&refclk>; | ||
331 | clock-names = "refclk"; | ||
332 | 328 | ||
333 | uart0_pmux: uart0-pmux { | 329 | chip_clk: clock { |
334 | groups = "G6"; | 330 | compatible = "marvell,berlin2-clk"; |
335 | function = "uart0"; | 331 | #clock-cells = <1>; |
332 | clocks = <&refclk>; | ||
333 | clock-names = "refclk"; | ||
334 | }; | ||
335 | |||
336 | soc_pinctrl: pin-controller { | ||
337 | compatible = "marvell,berlin2cd-soc-pinctrl"; | ||
338 | |||
339 | uart0_pmux: uart0-pmux { | ||
340 | groups = "G6"; | ||
341 | function = "uart0"; | ||
342 | }; | ||
343 | }; | ||
344 | |||
345 | chip_rst: reset { | ||
346 | compatible = "marvell,berlin2-reset"; | ||
347 | #reset-cells = <2>; | ||
336 | }; | 348 | }; |
337 | }; | 349 | }; |
338 | 350 | ||
@@ -340,7 +352,7 @@ | |||
340 | compatible = "chipidea,usb2"; | 352 | compatible = "chipidea,usb2"; |
341 | reg = <0xed0000 0x200>; | 353 | reg = <0xed0000 0x200>; |
342 | interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; | 354 | interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; |
343 | clocks = <&chip CLKID_USB0>; | 355 | clocks = <&chip_clk CLKID_USB0>; |
344 | phys = <&usb_phy0>; | 356 | phys = <&usb_phy0>; |
345 | phy-names = "usb-phy"; | 357 | phy-names = "usb-phy"; |
346 | status = "disabled"; | 358 | status = "disabled"; |
@@ -350,7 +362,7 @@ | |||
350 | compatible = "chipidea,usb2"; | 362 | compatible = "chipidea,usb2"; |
351 | reg = <0xee0000 0x200>; | 363 | reg = <0xee0000 0x200>; |
352 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 364 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
353 | clocks = <&chip CLKID_USB1>; | 365 | clocks = <&chip_clk CLKID_USB1>; |
354 | phys = <&usb_phy1>; | 366 | phys = <&usb_phy1>; |
355 | phy-names = "usb-phy"; | 367 | phy-names = "usb-phy"; |
356 | status = "disabled"; | 368 | status = "disabled"; |
@@ -417,8 +429,12 @@ | |||
417 | }; | 429 | }; |
418 | 430 | ||
419 | sysctrl: system-controller@d000 { | 431 | sysctrl: system-controller@d000 { |
420 | compatible = "marvell,berlin2cd-system-ctrl"; | 432 | compatible = "simple-mfd", "syscon"; |
421 | reg = <0xd000 0x100>; | 433 | reg = <0xd000 0x100>; |
434 | |||
435 | sys_pinctrl: pin-controller { | ||
436 | compatible = "marvell,berlin2cd-system-pinctrl"; | ||
437 | }; | ||
422 | }; | 438 | }; |
423 | 439 | ||
424 | sic: interrupt-controller@e000 { | 440 | sic: interrupt-controller@e000 { |
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi index 703ce384e0d7..63a48490e2f9 100644 --- a/arch/arm/boot/dts/berlin2q.dtsi +++ b/arch/arm/boot/dts/berlin2q.dtsi | |||
@@ -102,7 +102,7 @@ | |||
102 | sdhci0: sdhci@ab0000 { | 102 | sdhci0: sdhci@ab0000 { |
103 | compatible = "mrvl,pxav3-mmc"; | 103 | compatible = "mrvl,pxav3-mmc"; |
104 | reg = <0xab0000 0x200>; | 104 | reg = <0xab0000 0x200>; |
105 | clocks = <&chip CLKID_SDIO1XIN>; | 105 | clocks = <&chip_clk CLKID_SDIO1XIN>; |
106 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; | 106 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; |
107 | status = "disabled"; | 107 | status = "disabled"; |
108 | }; | 108 | }; |
@@ -110,7 +110,7 @@ | |||
110 | sdhci1: sdhci@ab0800 { | 110 | sdhci1: sdhci@ab0800 { |
111 | compatible = "mrvl,pxav3-mmc"; | 111 | compatible = "mrvl,pxav3-mmc"; |
112 | reg = <0xab0800 0x200>; | 112 | reg = <0xab0800 0x200>; |
113 | clocks = <&chip CLKID_SDIO1XIN>; | 113 | clocks = <&chip_clk CLKID_SDIO1XIN>; |
114 | interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; | 114 | interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; |
115 | status = "disabled"; | 115 | status = "disabled"; |
116 | }; | 116 | }; |
@@ -119,7 +119,7 @@ | |||
119 | compatible = "mrvl,pxav3-mmc"; | 119 | compatible = "mrvl,pxav3-mmc"; |
120 | reg = <0xab1000 0x200>; | 120 | reg = <0xab1000 0x200>; |
121 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; | 121 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; |
122 | clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>; | 122 | clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>; |
123 | clock-names = "io", "core"; | 123 | clock-names = "io", "core"; |
124 | status = "disabled"; | 124 | status = "disabled"; |
125 | }; | 125 | }; |
@@ -140,7 +140,7 @@ | |||
140 | local-timer@ad0600 { | 140 | local-timer@ad0600 { |
141 | compatible = "arm,cortex-a9-twd-timer"; | 141 | compatible = "arm,cortex-a9-twd-timer"; |
142 | reg = <0xad0600 0x20>; | 142 | reg = <0xad0600 0x20>; |
143 | clocks = <&chip CLKID_TWD>; | 143 | clocks = <&chip_clk CLKID_TWD>; |
144 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; | 144 | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; |
145 | }; | 145 | }; |
146 | 146 | ||
@@ -155,7 +155,7 @@ | |||
155 | compatible = "marvell,berlin2-usb-phy"; | 155 | compatible = "marvell,berlin2-usb-phy"; |
156 | reg = <0xa2f400 0x128>; | 156 | reg = <0xa2f400 0x128>; |
157 | #phy-cells = <0>; | 157 | #phy-cells = <0>; |
158 | resets = <&chip 0x104 14>; | 158 | resets = <&chip_rst 0x104 14>; |
159 | status = "disabled"; | 159 | status = "disabled"; |
160 | }; | 160 | }; |
161 | 161 | ||
@@ -163,7 +163,7 @@ | |||
163 | compatible = "chipidea,usb2"; | 163 | compatible = "chipidea,usb2"; |
164 | reg = <0xa30000 0x10000>; | 164 | reg = <0xa30000 0x10000>; |
165 | interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; | 165 | interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; |
166 | clocks = <&chip CLKID_USB2>; | 166 | clocks = <&chip_clk CLKID_USB2>; |
167 | phys = <&usb_phy2>; | 167 | phys = <&usb_phy2>; |
168 | phy-names = "usb-phy"; | 168 | phy-names = "usb-phy"; |
169 | status = "disabled"; | 169 | status = "disabled"; |
@@ -173,7 +173,7 @@ | |||
173 | compatible = "marvell,berlin2-usb-phy"; | 173 | compatible = "marvell,berlin2-usb-phy"; |
174 | reg = <0xb74000 0x128>; | 174 | reg = <0xb74000 0x128>; |
175 | #phy-cells = <0>; | 175 | #phy-cells = <0>; |
176 | resets = <&chip 0x104 12>; | 176 | resets = <&chip_rst 0x104 12>; |
177 | status = "disabled"; | 177 | status = "disabled"; |
178 | }; | 178 | }; |
179 | 179 | ||
@@ -181,14 +181,14 @@ | |||
181 | compatible = "marvell,berlin2-usb-phy"; | 181 | compatible = "marvell,berlin2-usb-phy"; |
182 | reg = <0xb78000 0x128>; | 182 | reg = <0xb78000 0x128>; |
183 | #phy-cells = <0>; | 183 | #phy-cells = <0>; |
184 | resets = <&chip 0x104 13>; | 184 | resets = <&chip_rst 0x104 13>; |
185 | status = "disabled"; | 185 | status = "disabled"; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | eth0: ethernet@b90000 { | 188 | eth0: ethernet@b90000 { |
189 | compatible = "marvell,pxa168-eth"; | 189 | compatible = "marvell,pxa168-eth"; |
190 | reg = <0xb90000 0x10000>; | 190 | reg = <0xb90000 0x10000>; |
191 | clocks = <&chip CLKID_GETH0>; | 191 | clocks = <&chip_clk CLKID_GETH0>; |
192 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; | 192 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; |
193 | /* set by bootloader */ | 193 | /* set by bootloader */ |
194 | local-mac-address = [00 00 00 00 00 00]; | 194 | local-mac-address = [00 00 00 00 00 00]; |
@@ -295,7 +295,7 @@ | |||
295 | reg = <0x1400 0x100>; | 295 | reg = <0x1400 0x100>; |
296 | interrupt-parent = <&aic>; | 296 | interrupt-parent = <&aic>; |
297 | interrupts = <4>; | 297 | interrupts = <4>; |
298 | clocks = <&chip CLKID_CFG>; | 298 | clocks = <&chip_clk CLKID_CFG>; |
299 | pinctrl-0 = <&twsi0_pmux>; | 299 | pinctrl-0 = <&twsi0_pmux>; |
300 | pinctrl-names = "default"; | 300 | pinctrl-names = "default"; |
301 | status = "disabled"; | 301 | status = "disabled"; |
@@ -308,7 +308,7 @@ | |||
308 | reg = <0x1800 0x100>; | 308 | reg = <0x1800 0x100>; |
309 | interrupt-parent = <&aic>; | 309 | interrupt-parent = <&aic>; |
310 | interrupts = <5>; | 310 | interrupts = <5>; |
311 | clocks = <&chip CLKID_CFG>; | 311 | clocks = <&chip_clk CLKID_CFG>; |
312 | pinctrl-0 = <&twsi1_pmux>; | 312 | pinctrl-0 = <&twsi1_pmux>; |
313 | pinctrl-names = "default"; | 313 | pinctrl-names = "default"; |
314 | status = "disabled"; | 314 | status = "disabled"; |
@@ -317,7 +317,7 @@ | |||
317 | timer0: timer@2c00 { | 317 | timer0: timer@2c00 { |
318 | compatible = "snps,dw-apb-timer"; | 318 | compatible = "snps,dw-apb-timer"; |
319 | reg = <0x2c00 0x14>; | 319 | reg = <0x2c00 0x14>; |
320 | clocks = <&chip CLKID_CFG>; | 320 | clocks = <&chip_clk CLKID_CFG>; |
321 | clock-names = "timer"; | 321 | clock-names = "timer"; |
322 | interrupts = <8>; | 322 | interrupts = <8>; |
323 | }; | 323 | }; |
@@ -325,14 +325,14 @@ | |||
325 | timer1: timer@2c14 { | 325 | timer1: timer@2c14 { |
326 | compatible = "snps,dw-apb-timer"; | 326 | compatible = "snps,dw-apb-timer"; |
327 | reg = <0x2c14 0x14>; | 327 | reg = <0x2c14 0x14>; |
328 | clocks = <&chip CLKID_CFG>; | 328 | clocks = <&chip_clk CLKID_CFG>; |
329 | clock-names = "timer"; | 329 | clock-names = "timer"; |
330 | }; | 330 | }; |
331 | 331 | ||
332 | timer2: timer@2c28 { | 332 | timer2: timer@2c28 { |
333 | compatible = "snps,dw-apb-timer"; | 333 | compatible = "snps,dw-apb-timer"; |
334 | reg = <0x2c28 0x14>; | 334 | reg = <0x2c28 0x14>; |
335 | clocks = <&chip CLKID_CFG>; | 335 | clocks = <&chip_clk CLKID_CFG>; |
336 | clock-names = "timer"; | 336 | clock-names = "timer"; |
337 | status = "disabled"; | 337 | status = "disabled"; |
338 | }; | 338 | }; |
@@ -340,7 +340,7 @@ | |||
340 | timer3: timer@2c3c { | 340 | timer3: timer@2c3c { |
341 | compatible = "snps,dw-apb-timer"; | 341 | compatible = "snps,dw-apb-timer"; |
342 | reg = <0x2c3c 0x14>; | 342 | reg = <0x2c3c 0x14>; |
343 | clocks = <&chip CLKID_CFG>; | 343 | clocks = <&chip_clk CLKID_CFG>; |
344 | clock-names = "timer"; | 344 | clock-names = "timer"; |
345 | status = "disabled"; | 345 | status = "disabled"; |
346 | }; | 346 | }; |
@@ -348,7 +348,7 @@ | |||
348 | timer4: timer@2c50 { | 348 | timer4: timer@2c50 { |
349 | compatible = "snps,dw-apb-timer"; | 349 | compatible = "snps,dw-apb-timer"; |
350 | reg = <0x2c50 0x14>; | 350 | reg = <0x2c50 0x14>; |
351 | clocks = <&chip CLKID_CFG>; | 351 | clocks = <&chip_clk CLKID_CFG>; |
352 | clock-names = "timer"; | 352 | clock-names = "timer"; |
353 | status = "disabled"; | 353 | status = "disabled"; |
354 | }; | 354 | }; |
@@ -356,7 +356,7 @@ | |||
356 | timer5: timer@2c64 { | 356 | timer5: timer@2c64 { |
357 | compatible = "snps,dw-apb-timer"; | 357 | compatible = "snps,dw-apb-timer"; |
358 | reg = <0x2c64 0x14>; | 358 | reg = <0x2c64 0x14>; |
359 | clocks = <&chip CLKID_CFG>; | 359 | clocks = <&chip_clk CLKID_CFG>; |
360 | clock-names = "timer"; | 360 | clock-names = "timer"; |
361 | status = "disabled"; | 361 | status = "disabled"; |
362 | }; | 362 | }; |
@@ -364,7 +364,7 @@ | |||
364 | timer6: timer@2c78 { | 364 | timer6: timer@2c78 { |
365 | compatible = "snps,dw-apb-timer"; | 365 | compatible = "snps,dw-apb-timer"; |
366 | reg = <0x2c78 0x14>; | 366 | reg = <0x2c78 0x14>; |
367 | clocks = <&chip CLKID_CFG>; | 367 | clocks = <&chip_clk CLKID_CFG>; |
368 | clock-names = "timer"; | 368 | clock-names = "timer"; |
369 | status = "disabled"; | 369 | status = "disabled"; |
370 | }; | 370 | }; |
@@ -372,7 +372,7 @@ | |||
372 | timer7: timer@2c8c { | 372 | timer7: timer@2c8c { |
373 | compatible = "snps,dw-apb-timer"; | 373 | compatible = "snps,dw-apb-timer"; |
374 | reg = <0x2c8c 0x14>; | 374 | reg = <0x2c8c 0x14>; |
375 | clocks = <&chip CLKID_CFG>; | 375 | clocks = <&chip_clk CLKID_CFG>; |
376 | clock-names = "timer"; | 376 | clock-names = "timer"; |
377 | status = "disabled"; | 377 | status = "disabled"; |
378 | }; | 378 | }; |
@@ -388,21 +388,33 @@ | |||
388 | }; | 388 | }; |
389 | 389 | ||
390 | chip: chip-control@ea0000 { | 390 | chip: chip-control@ea0000 { |
391 | compatible = "marvell,berlin2q-chip-ctrl"; | 391 | compatible = "simple-mfd", "syscon"; |
392 | #clock-cells = <1>; | ||
393 | #reset-cells = <2>; | ||
394 | reg = <0xea0000 0x400>, <0xdd0170 0x10>; | 392 | reg = <0xea0000 0x400>, <0xdd0170 0x10>; |
395 | clocks = <&refclk>; | ||
396 | clock-names = "refclk"; | ||
397 | 393 | ||
398 | twsi0_pmux: twsi0-pmux { | 394 | chip_clk: clock { |
399 | groups = "G6"; | 395 | compatible = "marvell,berlin2q-clk"; |
400 | function = "twsi0"; | 396 | #clock-cells = <1>; |
397 | clocks = <&refclk>; | ||
398 | clock-names = "refclk"; | ||
401 | }; | 399 | }; |
402 | 400 | ||
403 | twsi1_pmux: twsi1-pmux { | 401 | soc_pinctrl: pin-controller { |
404 | groups = "G7"; | 402 | compatible = "marvell,berlin2q-soc-pinctrl"; |
405 | function = "twsi1"; | 403 | |
404 | twsi0_pmux: twsi0-pmux { | ||
405 | groups = "G6"; | ||
406 | function = "twsi0"; | ||
407 | }; | ||
408 | |||
409 | twsi1_pmux: twsi1-pmux { | ||
410 | groups = "G7"; | ||
411 | function = "twsi1"; | ||
412 | }; | ||
413 | }; | ||
414 | |||
415 | chip_rst: reset { | ||
416 | compatible = "marvell,berlin2-reset"; | ||
417 | #reset-cells = <2>; | ||
406 | }; | 418 | }; |
407 | }; | 419 | }; |
408 | 420 | ||
@@ -410,7 +422,7 @@ | |||
410 | compatible = "marvell,berlin2q-ahci", "generic-ahci"; | 422 | compatible = "marvell,berlin2q-ahci", "generic-ahci"; |
411 | reg = <0xe90000 0x1000>; | 423 | reg = <0xe90000 0x1000>; |
412 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; | 424 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; |
413 | clocks = <&chip CLKID_SATA>; | 425 | clocks = <&chip_clk CLKID_SATA>; |
414 | #address-cells = <1>; | 426 | #address-cells = <1>; |
415 | #size-cells = <0>; | 427 | #size-cells = <0>; |
416 | 428 | ||
@@ -430,7 +442,7 @@ | |||
430 | sata_phy: phy@e900a0 { | 442 | sata_phy: phy@e900a0 { |
431 | compatible = "marvell,berlin2q-sata-phy"; | 443 | compatible = "marvell,berlin2q-sata-phy"; |
432 | reg = <0xe900a0 0x200>; | 444 | reg = <0xe900a0 0x200>; |
433 | clocks = <&chip CLKID_SATA>; | 445 | clocks = <&chip_clk CLKID_SATA>; |
434 | #address-cells = <1>; | 446 | #address-cells = <1>; |
435 | #size-cells = <0>; | 447 | #size-cells = <0>; |
436 | #phy-cells = <1>; | 448 | #phy-cells = <1>; |
@@ -449,7 +461,7 @@ | |||
449 | compatible = "chipidea,usb2"; | 461 | compatible = "chipidea,usb2"; |
450 | reg = <0xed0000 0x10000>; | 462 | reg = <0xed0000 0x10000>; |
451 | interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; | 463 | interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; |
452 | clocks = <&chip CLKID_USB0>; | 464 | clocks = <&chip_clk CLKID_USB0>; |
453 | phys = <&usb_phy0>; | 465 | phys = <&usb_phy0>; |
454 | phy-names = "usb-phy"; | 466 | phy-names = "usb-phy"; |
455 | status = "disabled"; | 467 | status = "disabled"; |
@@ -459,7 +471,7 @@ | |||
459 | compatible = "chipidea,usb2"; | 471 | compatible = "chipidea,usb2"; |
460 | reg = <0xee0000 0x10000>; | 472 | reg = <0xee0000 0x10000>; |
461 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 473 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
462 | clocks = <&chip CLKID_USB1>; | 474 | clocks = <&chip_clk CLKID_USB1>; |
463 | phys = <&usb_phy1>; | 475 | phys = <&usb_phy1>; |
464 | phy-names = "usb-phy"; | 476 | phy-names = "usb-phy"; |
465 | status = "disabled"; | 477 | status = "disabled"; |
@@ -554,27 +566,37 @@ | |||
554 | }; | 566 | }; |
555 | 567 | ||
556 | sysctrl: pin-controller@d000 { | 568 | sysctrl: pin-controller@d000 { |
557 | compatible = "marvell,berlin2q-system-ctrl"; | 569 | compatible = "simple-mfd", "syscon"; |
558 | reg = <0xd000 0x100>; | 570 | reg = <0xd000 0x100>; |
559 | 571 | ||
560 | uart0_pmux: uart0-pmux { | 572 | sys_pinctrl: pin-controller { |
561 | groups = "GSM12"; | 573 | compatible = "marvell,berlin2q-system-pinctrl"; |
562 | function = "uart0"; | ||
563 | }; | ||
564 | 574 | ||
565 | uart1_pmux: uart1-pmux { | 575 | uart0_pmux: uart0-pmux { |
566 | groups = "GSM14"; | 576 | groups = "GSM12"; |
567 | function = "uart1"; | 577 | function = "uart0"; |
568 | }; | 578 | }; |
579 | |||
580 | uart1_pmux: uart1-pmux { | ||
581 | groups = "GSM14"; | ||
582 | function = "uart1"; | ||
583 | }; | ||
584 | |||
585 | twsi2_pmux: twsi2-pmux { | ||
586 | groups = "GSM13"; | ||
587 | function = "twsi2"; | ||
588 | }; | ||
569 | 589 | ||
570 | twsi2_pmux: twsi2-pmux { | 590 | twsi3_pmux: twsi3-pmux { |
571 | groups = "GSM13"; | 591 | groups = "GSM14"; |
572 | function = "twsi2"; | 592 | function = "twsi3"; |
593 | }; | ||
573 | }; | 594 | }; |
574 | 595 | ||
575 | twsi3_pmux: twsi3-pmux { | 596 | adc: adc { |
576 | groups = "GSM14"; | 597 | compatible = "marvell,berlin2-adc"; |
577 | function = "twsi3"; | 598 | interrupts = <12>, <14>; |
599 | interrupt-names = "adc", "tsen"; | ||
578 | }; | 600 | }; |
579 | }; | 601 | }; |
580 | 602 | ||
diff --git a/arch/arm/boot/dts/integrator.dtsi b/arch/arm/boot/dts/integrator.dtsi index 28e38f8c6b0f..3807d4f46ef7 100644 --- a/arch/arm/boot/dts/integrator.dtsi +++ b/arch/arm/boot/dts/integrator.dtsi | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | / { | 7 | / { |
8 | core-module@10000000 { | 8 | core-module@10000000 { |
9 | compatible = "arm,core-module-integrator", "syscon"; | 9 | compatible = "arm,core-module-integrator", "syscon", "simple-mfd"; |
10 | reg = <0x10000000 0x200>; | 10 | reg = <0x10000000 0x200>; |
11 | 11 | ||
12 | /* Use core module LED to indicate CPU load */ | 12 | /* Use core module LED to indicate CPU load */ |
@@ -95,7 +95,7 @@ | |||
95 | 95 | ||
96 | syscon { | 96 | syscon { |
97 | /* Debug registers mapped as syscon */ | 97 | /* Debug registers mapped as syscon */ |
98 | compatible = "syscon"; | 98 | compatible = "syscon", "simple-mfd"; |
99 | reg = <0x1a000000 0x10>; | 99 | reg = <0x1a000000 0x10>; |
100 | 100 | ||
101 | led@04.0 { | 101 | led@04.0 { |
diff --git a/arch/arm/mach-berlin/Kconfig b/arch/arm/mach-berlin/Kconfig index 3e40a947f3ea..742d53a5f7f9 100644 --- a/arch/arm/mach-berlin/Kconfig +++ b/arch/arm/mach-berlin/Kconfig | |||
@@ -6,6 +6,7 @@ menuconfig ARCH_BERLIN | |||
6 | select DW_APB_ICTL | 6 | select DW_APB_ICTL |
7 | select DW_APB_TIMER_OF | 7 | select DW_APB_TIMER_OF |
8 | select GENERIC_IRQ_CHIP | 8 | select GENERIC_IRQ_CHIP |
9 | select MFD_SYSCON | ||
9 | select PINCTRL | 10 | select PINCTRL |
10 | 11 | ||
11 | if ARCH_BERLIN | 12 | if ARCH_BERLIN |
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index fde0cfad09de..021e0f40f419 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi | |||
@@ -138,6 +138,74 @@ | |||
138 | clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3"; | 138 | clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3"; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | apbregs@010000 { | ||
142 | compatible = "syscon", "simple-mfd"; | ||
143 | reg = <0x010000 0x1000>; | ||
144 | |||
145 | led@08.0 { | ||
146 | compatible = "register-bit-led"; | ||
147 | offset = <0x08>; | ||
148 | mask = <0x01>; | ||
149 | label = "vexpress:0"; | ||
150 | linux,default-trigger = "heartbeat"; | ||
151 | default-state = "on"; | ||
152 | }; | ||
153 | led@08.1 { | ||
154 | compatible = "register-bit-led"; | ||
155 | offset = <0x08>; | ||
156 | mask = <0x02>; | ||
157 | label = "vexpress:1"; | ||
158 | linux,default-trigger = "mmc0"; | ||
159 | default-state = "off"; | ||
160 | }; | ||
161 | led@08.2 { | ||
162 | compatible = "register-bit-led"; | ||
163 | offset = <0x08>; | ||
164 | mask = <0x04>; | ||
165 | label = "vexpress:2"; | ||
166 | linux,default-trigger = "cpu0"; | ||
167 | default-state = "off"; | ||
168 | }; | ||
169 | led@08.3 { | ||
170 | compatible = "register-bit-led"; | ||
171 | offset = <0x08>; | ||
172 | mask = <0x08>; | ||
173 | label = "vexpress:3"; | ||
174 | linux,default-trigger = "cpu1"; | ||
175 | default-state = "off"; | ||
176 | }; | ||
177 | led@08.4 { | ||
178 | compatible = "register-bit-led"; | ||
179 | offset = <0x08>; | ||
180 | mask = <0x10>; | ||
181 | label = "vexpress:4"; | ||
182 | linux,default-trigger = "cpu2"; | ||
183 | default-state = "off"; | ||
184 | }; | ||
185 | led@08.5 { | ||
186 | compatible = "register-bit-led"; | ||
187 | offset = <0x08>; | ||
188 | mask = <0x20>; | ||
189 | label = "vexpress:5"; | ||
190 | linux,default-trigger = "cpu3"; | ||
191 | default-state = "off"; | ||
192 | }; | ||
193 | led@08.6 { | ||
194 | compatible = "register-bit-led"; | ||
195 | offset = <0x08>; | ||
196 | mask = <0x40>; | ||
197 | label = "vexpress:6"; | ||
198 | default-state = "off"; | ||
199 | }; | ||
200 | led@08.7 { | ||
201 | compatible = "register-bit-led"; | ||
202 | offset = <0x08>; | ||
203 | mask = <0x80>; | ||
204 | label = "vexpress:7"; | ||
205 | default-state = "off"; | ||
206 | }; | ||
207 | }; | ||
208 | |||
141 | mmci@050000 { | 209 | mmci@050000 { |
142 | compatible = "arm,pl180", "arm,primecell"; | 210 | compatible = "arm,pl180", "arm,primecell"; |
143 | reg = <0x050000 0x1000>; | 211 | reg = <0x050000 0x1000>; |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 1d293ea16f46..ab66f1600cec 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -139,6 +139,12 @@ CONFIG_MMC_ARMMMCI=y | |||
139 | CONFIG_MMC_SDHCI=y | 139 | CONFIG_MMC_SDHCI=y |
140 | CONFIG_MMC_SDHCI_PLTFM=y | 140 | CONFIG_MMC_SDHCI_PLTFM=y |
141 | CONFIG_MMC_SPI=y | 141 | CONFIG_MMC_SPI=y |
142 | CONFIG_NEW_LEDS=y | ||
143 | CONFIG_LEDS_CLASS=y | ||
144 | CONFIG_LEDS_SYSCON=y | ||
145 | CONFIG_LEDS_TRIGGERS=y | ||
146 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
147 | CONFIG_LEDS_TRIGGER_CPU=y | ||
142 | CONFIG_RTC_CLASS=y | 148 | CONFIG_RTC_CLASS=y |
143 | CONFIG_RTC_DRV_EFI=y | 149 | CONFIG_RTC_DRV_EFI=y |
144 | CONFIG_RTC_DRV_XGENE=y | 150 | CONFIG_RTC_DRV_XGENE=y |
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index a1d4af6df3f5..1a82f3a17681 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
@@ -7,21 +7,24 @@ menu "Bus devices" | |||
7 | config ARM_CCI | 7 | config ARM_CCI |
8 | bool | 8 | bool |
9 | 9 | ||
10 | config ARM_CCI_PMU | ||
11 | bool | ||
12 | select ARM_CCI | ||
13 | |||
10 | config ARM_CCI400_COMMON | 14 | config ARM_CCI400_COMMON |
11 | bool | 15 | bool |
12 | select ARM_CCI | 16 | select ARM_CCI |
13 | 17 | ||
14 | config ARM_CCI400_PMU | 18 | config ARM_CCI400_PMU |
15 | bool "ARM CCI400 PMU support" | 19 | bool "ARM CCI400 PMU support" |
16 | default y | 20 | depends on (ARM && CPU_V7) || ARM64 |
17 | depends on ARM || ARM64 | 21 | depends on PERF_EVENTS |
18 | depends on HW_PERF_EVENTS | ||
19 | select ARM_CCI400_COMMON | 22 | select ARM_CCI400_COMMON |
23 | select ARM_CCI_PMU | ||
20 | help | 24 | help |
21 | Support for PMU events monitoring on the ARM CCI cache coherent | 25 | Support for PMU events monitoring on the ARM CCI-400 (cache coherent |
22 | interconnect. | 26 | interconnect). CCI-400 supports counting events related to the |
23 | 27 | connected slave/master interfaces. | |
24 | If unsure, say Y | ||
25 | 28 | ||
26 | config ARM_CCI400_PORT_CTRL | 29 | config ARM_CCI400_PORT_CTRL |
27 | bool | 30 | bool |
@@ -31,6 +34,20 @@ config ARM_CCI400_PORT_CTRL | |||
31 | Low level power management driver for CCI400 cache coherent | 34 | Low level power management driver for CCI400 cache coherent |
32 | interconnect for ARM platforms. | 35 | interconnect for ARM platforms. |
33 | 36 | ||
37 | config ARM_CCI500_PMU | ||
38 | bool "ARM CCI500 PMU support" | ||
39 | default y | ||
40 | depends on (ARM && CPU_V7) || ARM64 | ||
41 | depends on PERF_EVENTS | ||
42 | select ARM_CCI_PMU | ||
43 | help | ||
44 | Support for PMU events monitoring on the ARM CCI-500 cache coherent | ||
45 | interconnect. CCI-500 provides 8 independent event counters, which | ||
46 | can count events pertaining to the slave/master interfaces as well | ||
47 | as the internal events to the CCI. | ||
48 | |||
49 | If unsure, say Y | ||
50 | |||
34 | config ARM_CCN | 51 | config ARM_CCN |
35 | bool "ARM CCN driver support" | 52 | bool "ARM CCN driver support" |
36 | depends on ARM || ARM64 | 53 | depends on ARM || ARM64 |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5340604b23a4..577cc4bf6a9d 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -52,12 +52,15 @@ static const struct of_device_id arm_cci_matches[] = { | |||
52 | #ifdef CONFIG_ARM_CCI400_COMMON | 52 | #ifdef CONFIG_ARM_CCI400_COMMON |
53 | {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, | 53 | {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, |
54 | #endif | 54 | #endif |
55 | #ifdef CONFIG_ARM_CCI500_PMU | ||
56 | { .compatible = "arm,cci-500", }, | ||
57 | #endif | ||
55 | {}, | 58 | {}, |
56 | }; | 59 | }; |
57 | 60 | ||
58 | #ifdef CONFIG_ARM_CCI400_PMU | 61 | #ifdef CONFIG_ARM_CCI_PMU |
59 | 62 | ||
60 | #define DRIVER_NAME "CCI-400" | 63 | #define DRIVER_NAME "ARM-CCI" |
61 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | 64 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" |
62 | 65 | ||
63 | #define CCI_PMCR 0x0100 | 66 | #define CCI_PMCR 0x0100 |
@@ -77,20 +80,21 @@ static const struct of_device_id arm_cci_matches[] = { | |||
77 | 80 | ||
78 | #define CCI_PMU_OVRFLW_FLAG 1 | 81 | #define CCI_PMU_OVRFLW_FLAG 1 |
79 | 82 | ||
80 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | 83 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) |
81 | 84 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
82 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | 85 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) |
86 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) | ||
83 | 87 | ||
84 | #define CCI_PMU_EVENT_MASK 0xffUL | 88 | #define CCI_PMU_MAX_HW_CNTRS(model) \ |
85 | #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) | 89 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) |
86 | #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) | ||
87 | |||
88 | #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ | ||
89 | 90 | ||
90 | /* Types of interfaces that can generate events */ | 91 | /* Types of interfaces that can generate events */ |
91 | enum { | 92 | enum { |
92 | CCI_IF_SLAVE, | 93 | CCI_IF_SLAVE, |
93 | CCI_IF_MASTER, | 94 | CCI_IF_MASTER, |
95 | #ifdef CONFIG_ARM_CCI500_PMU | ||
96 | CCI_IF_GLOBAL, | ||
97 | #endif | ||
94 | CCI_IF_MAX, | 98 | CCI_IF_MAX, |
95 | }; | 99 | }; |
96 | 100 | ||
@@ -100,14 +104,30 @@ struct event_range { | |||
100 | }; | 104 | }; |
101 | 105 | ||
102 | struct cci_pmu_hw_events { | 106 | struct cci_pmu_hw_events { |
103 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | 107 | struct perf_event **events; |
104 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | 108 | unsigned long *used_mask; |
105 | raw_spinlock_t pmu_lock; | 109 | raw_spinlock_t pmu_lock; |
106 | }; | 110 | }; |
107 | 111 | ||
112 | struct cci_pmu; | ||
113 | /* | ||
114 | * struct cci_pmu_model: | ||
115 | * @fixed_hw_cntrs - Number of fixed event counters | ||
116 | * @num_hw_cntrs - Maximum number of programmable event counters | ||
117 | * @cntr_size - Size of an event counter mapping | ||
118 | */ | ||
108 | struct cci_pmu_model { | 119 | struct cci_pmu_model { |
109 | char *name; | 120 | char *name; |
121 | u32 fixed_hw_cntrs; | ||
122 | u32 num_hw_cntrs; | ||
123 | u32 cntr_size; | ||
124 | u64 nformat_attrs; | ||
125 | u64 nevent_attrs; | ||
126 | struct dev_ext_attribute *format_attrs; | ||
127 | struct dev_ext_attribute *event_attrs; | ||
110 | struct event_range event_ranges[CCI_IF_MAX]; | 128 | struct event_range event_ranges[CCI_IF_MAX]; |
129 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); | ||
130 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | ||
111 | }; | 131 | }; |
112 | 132 | ||
113 | static struct cci_pmu_model cci_pmu_models[]; | 133 | static struct cci_pmu_model cci_pmu_models[]; |
@@ -116,33 +136,59 @@ struct cci_pmu { | |||
116 | void __iomem *base; | 136 | void __iomem *base; |
117 | struct pmu pmu; | 137 | struct pmu pmu; |
118 | int nr_irqs; | 138 | int nr_irqs; |
119 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | 139 | int *irqs; |
120 | unsigned long active_irqs; | 140 | unsigned long active_irqs; |
121 | const struct cci_pmu_model *model; | 141 | const struct cci_pmu_model *model; |
122 | struct cci_pmu_hw_events hw_events; | 142 | struct cci_pmu_hw_events hw_events; |
123 | struct platform_device *plat_device; | 143 | struct platform_device *plat_device; |
124 | int num_events; | 144 | int num_cntrs; |
125 | atomic_t active_events; | 145 | atomic_t active_events; |
126 | struct mutex reserve_mutex; | 146 | struct mutex reserve_mutex; |
147 | struct notifier_block cpu_nb; | ||
127 | cpumask_t cpus; | 148 | cpumask_t cpus; |
128 | }; | 149 | }; |
129 | static struct cci_pmu *pmu; | ||
130 | 150 | ||
131 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | 151 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) |
132 | 152 | ||
153 | enum cci_models { | ||
154 | #ifdef CONFIG_ARM_CCI400_PMU | ||
155 | CCI400_R0, | ||
156 | CCI400_R1, | ||
157 | #endif | ||
158 | #ifdef CONFIG_ARM_CCI500_PMU | ||
159 | CCI500_R0, | ||
160 | #endif | ||
161 | CCI_MODEL_MAX | ||
162 | }; | ||
163 | |||
164 | static ssize_t cci_pmu_format_show(struct device *dev, | ||
165 | struct device_attribute *attr, char *buf); | ||
166 | static ssize_t cci_pmu_event_show(struct device *dev, | ||
167 | struct device_attribute *attr, char *buf); | ||
168 | |||
169 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ | ||
170 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } | ||
171 | |||
172 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | ||
173 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) | ||
174 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | ||
175 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | ||
176 | |||
177 | /* CCI400 PMU Specific definitions */ | ||
178 | |||
179 | #ifdef CONFIG_ARM_CCI400_PMU | ||
180 | |||
133 | /* Port ids */ | 181 | /* Port ids */ |
134 | #define CCI_PORT_S0 0 | 182 | #define CCI400_PORT_S0 0 |
135 | #define CCI_PORT_S1 1 | 183 | #define CCI400_PORT_S1 1 |
136 | #define CCI_PORT_S2 2 | 184 | #define CCI400_PORT_S2 2 |
137 | #define CCI_PORT_S3 3 | 185 | #define CCI400_PORT_S3 3 |
138 | #define CCI_PORT_S4 4 | 186 | #define CCI400_PORT_S4 4 |
139 | #define CCI_PORT_M0 5 | 187 | #define CCI400_PORT_M0 5 |
140 | #define CCI_PORT_M1 6 | 188 | #define CCI400_PORT_M1 6 |
141 | #define CCI_PORT_M2 7 | 189 | #define CCI400_PORT_M2 7 |
142 | 190 | ||
143 | #define CCI_REV_R0 0 | 191 | #define CCI400_R1_PX 5 |
144 | #define CCI_REV_R1 1 | ||
145 | #define CCI_REV_R1_PX 5 | ||
146 | 192 | ||
147 | /* | 193 | /* |
148 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | 194 | * Instead of an event id to monitor CCI cycles, a dedicated counter is |
@@ -150,12 +196,11 @@ static struct cci_pmu *pmu; | |||
150 | * make use of this event in hardware. | 196 | * make use of this event in hardware. |
151 | */ | 197 | */ |
152 | enum cci400_perf_events { | 198 | enum cci400_perf_events { |
153 | CCI_PMU_CYCLES = 0xff | 199 | CCI400_PMU_CYCLES = 0xff |
154 | }; | 200 | }; |
155 | 201 | ||
156 | #define CCI_PMU_CYCLE_CNTR_IDX 0 | 202 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 |
157 | #define CCI_PMU_CNTR0_IDX 1 | 203 | #define CCI400_PMU_CNTR0_IDX 1 |
158 | #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) | ||
159 | 204 | ||
160 | /* | 205 | /* |
161 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | 206 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 |
@@ -169,37 +214,173 @@ enum cci400_perf_events { | |||
169 | * the different revisions and are used to validate the event to be monitored. | 214 | * the different revisions and are used to validate the event to be monitored. |
170 | */ | 215 | */ |
171 | 216 | ||
172 | #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 | 217 | #define CCI400_PMU_EVENT_MASK 0xffUL |
173 | #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 | 218 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 |
174 | #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 | 219 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 |
175 | #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a | 220 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 |
221 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | ||
222 | #define CCI400_PMU_EVENT_SOURCE(event) \ | ||
223 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | ||
224 | CCI400_PMU_EVENT_SOURCE_MASK) | ||
225 | #define CCI400_PMU_EVENT_CODE(event) \ | ||
226 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | ||
227 | |||
228 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | ||
229 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | ||
230 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | ||
231 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | ||
232 | |||
233 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | ||
234 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | ||
235 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | ||
236 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | ||
237 | |||
238 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | ||
239 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | ||
240 | (unsigned long)_config) | ||
241 | |||
242 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | ||
243 | struct device_attribute *attr, char *buf); | ||
244 | |||
245 | static struct dev_ext_attribute cci400_pmu_format_attrs[] = { | ||
246 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | ||
247 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | ||
248 | }; | ||
249 | |||
250 | static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = { | ||
251 | /* Slave events */ | ||
252 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | ||
253 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | ||
254 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | ||
255 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | ||
256 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | ||
257 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | ||
258 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | ||
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | ||
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | ||
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | ||
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | ||
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | ||
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | ||
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | ||
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | ||
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | ||
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | ||
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | ||
270 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | ||
271 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | ||
272 | /* Master events */ | ||
273 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | ||
274 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | ||
275 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | ||
276 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | ||
277 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | ||
278 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | ||
279 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | ||
280 | /* Special event for cycles counter */ | ||
281 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | ||
282 | }; | ||
283 | |||
284 | static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = { | ||
285 | /* Slave events */ | ||
286 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | ||
287 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | ||
288 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | ||
289 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | ||
290 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | ||
291 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | ||
292 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | ||
293 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | ||
294 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | ||
295 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | ||
296 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | ||
297 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | ||
298 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | ||
299 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | ||
300 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | ||
301 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | ||
302 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | ||
303 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | ||
304 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | ||
305 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | ||
306 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | ||
307 | /* Master events */ | ||
308 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | ||
309 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | ||
310 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | ||
311 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | ||
312 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | ||
313 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | ||
314 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | ||
315 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | ||
316 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | ||
317 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | ||
318 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | ||
319 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | ||
320 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | ||
321 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | ||
322 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | ||
323 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | ||
324 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | ||
325 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | ||
326 | /* Special event for cycles counter */ | ||
327 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | ||
328 | }; | ||
176 | 329 | ||
177 | #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 | 330 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, |
178 | #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 | 331 | struct device_attribute *attr, char *buf) |
179 | #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 | 332 | { |
180 | #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 | 333 | struct dev_ext_attribute *eattr = container_of(attr, |
334 | struct dev_ext_attribute, attr); | ||
335 | return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); | ||
336 | } | ||
181 | 337 | ||
182 | static int pmu_validate_hw_event(unsigned long hw_event) | 338 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, |
339 | struct cci_pmu_hw_events *hw, | ||
340 | unsigned long cci_event) | ||
183 | { | 341 | { |
184 | u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); | 342 | int idx; |
185 | u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); | 343 | |
344 | /* cycles event idx is fixed */ | ||
345 | if (cci_event == CCI400_PMU_CYCLES) { | ||
346 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | ||
347 | return -EAGAIN; | ||
348 | |||
349 | return CCI400_PMU_CYCLE_CNTR_IDX; | ||
350 | } | ||
351 | |||
352 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | ||
353 | if (!test_and_set_bit(idx, hw->used_mask)) | ||
354 | return idx; | ||
355 | |||
356 | /* No counters available */ | ||
357 | return -EAGAIN; | ||
358 | } | ||
359 | |||
360 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | ||
361 | { | ||
362 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); | ||
363 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | ||
186 | int if_type; | 364 | int if_type; |
187 | 365 | ||
188 | if (hw_event & ~CCI_PMU_EVENT_MASK) | 366 | if (hw_event & ~CCI400_PMU_EVENT_MASK) |
189 | return -ENOENT; | 367 | return -ENOENT; |
190 | 368 | ||
369 | if (hw_event == CCI400_PMU_CYCLES) | ||
370 | return hw_event; | ||
371 | |||
191 | switch (ev_source) { | 372 | switch (ev_source) { |
192 | case CCI_PORT_S0: | 373 | case CCI400_PORT_S0: |
193 | case CCI_PORT_S1: | 374 | case CCI400_PORT_S1: |
194 | case CCI_PORT_S2: | 375 | case CCI400_PORT_S2: |
195 | case CCI_PORT_S3: | 376 | case CCI400_PORT_S3: |
196 | case CCI_PORT_S4: | 377 | case CCI400_PORT_S4: |
197 | /* Slave Interface */ | 378 | /* Slave Interface */ |
198 | if_type = CCI_IF_SLAVE; | 379 | if_type = CCI_IF_SLAVE; |
199 | break; | 380 | break; |
200 | case CCI_PORT_M0: | 381 | case CCI400_PORT_M0: |
201 | case CCI_PORT_M1: | 382 | case CCI400_PORT_M1: |
202 | case CCI_PORT_M2: | 383 | case CCI400_PORT_M2: |
203 | /* Master Interface */ | 384 | /* Master Interface */ |
204 | if_type = CCI_IF_MASTER; | 385 | if_type = CCI_IF_MASTER; |
205 | break; | 386 | break; |
@@ -207,87 +388,291 @@ static int pmu_validate_hw_event(unsigned long hw_event) | |||
207 | return -ENOENT; | 388 | return -ENOENT; |
208 | } | 389 | } |
209 | 390 | ||
210 | if (ev_code >= pmu->model->event_ranges[if_type].min && | 391 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && |
211 | ev_code <= pmu->model->event_ranges[if_type].max) | 392 | ev_code <= cci_pmu->model->event_ranges[if_type].max) |
212 | return hw_event; | 393 | return hw_event; |
213 | 394 | ||
214 | return -ENOENT; | 395 | return -ENOENT; |
215 | } | 396 | } |
216 | 397 | ||
217 | static int probe_cci_revision(void) | 398 | static int probe_cci400_revision(void) |
218 | { | 399 | { |
219 | int rev; | 400 | int rev; |
220 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | 401 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; |
221 | rev >>= CCI_PID2_REV_SHIFT; | 402 | rev >>= CCI_PID2_REV_SHIFT; |
222 | 403 | ||
223 | if (rev < CCI_REV_R1_PX) | 404 | if (rev < CCI400_R1_PX) |
224 | return CCI_REV_R0; | 405 | return CCI400_R0; |
225 | else | 406 | else |
226 | return CCI_REV_R1; | 407 | return CCI400_R1; |
227 | } | 408 | } |
228 | 409 | ||
229 | static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) | 410 | static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) |
230 | { | 411 | { |
231 | if (platform_has_secure_cci_access()) | 412 | if (platform_has_secure_cci_access()) |
232 | return &cci_pmu_models[probe_cci_revision()]; | 413 | return &cci_pmu_models[probe_cci400_revision()]; |
233 | return NULL; | 414 | return NULL; |
234 | } | 415 | } |
416 | #else /* !CONFIG_ARM_CCI400_PMU */ | ||
417 | static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) | ||
418 | { | ||
419 | return NULL; | ||
420 | } | ||
421 | #endif /* CONFIG_ARM_CCI400_PMU */ | ||
422 | |||
423 | #ifdef CONFIG_ARM_CCI500_PMU | ||
424 | |||
425 | /* | ||
426 | * CCI500 provides 8 independent event counters that can count | ||
427 | * any of the events available. | ||
428 | * | ||
429 | * CCI500 PMU event id is an 9-bit value made of two parts. | ||
430 | * bits [8:5] - Source for the event | ||
431 | * 0x0-0x6 - Slave interfaces | ||
432 | * 0x8-0xD - Master interfaces | ||
433 | * 0xf - Global Events | ||
434 | * 0x7,0xe - Reserved | ||
435 | * | ||
436 | * bits [4:0] - Event code (specific to type of interface) | ||
437 | */ | ||
438 | |||
439 | /* Port ids */ | ||
440 | #define CCI500_PORT_S0 0x0 | ||
441 | #define CCI500_PORT_S1 0x1 | ||
442 | #define CCI500_PORT_S2 0x2 | ||
443 | #define CCI500_PORT_S3 0x3 | ||
444 | #define CCI500_PORT_S4 0x4 | ||
445 | #define CCI500_PORT_S5 0x5 | ||
446 | #define CCI500_PORT_S6 0x6 | ||
447 | |||
448 | #define CCI500_PORT_M0 0x8 | ||
449 | #define CCI500_PORT_M1 0x9 | ||
450 | #define CCI500_PORT_M2 0xa | ||
451 | #define CCI500_PORT_M3 0xb | ||
452 | #define CCI500_PORT_M4 0xc | ||
453 | #define CCI500_PORT_M5 0xd | ||
454 | |||
455 | #define CCI500_PORT_GLOBAL 0xf | ||
456 | |||
457 | #define CCI500_PMU_EVENT_MASK 0x1ffUL | ||
458 | #define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5 | ||
459 | #define CCI500_PMU_EVENT_SOURCE_MASK 0xf | ||
460 | #define CCI500_PMU_EVENT_CODE_SHIFT 0x0 | ||
461 | #define CCI500_PMU_EVENT_CODE_MASK 0x1f | ||
462 | |||
463 | #define CCI500_PMU_EVENT_SOURCE(event) \ | ||
464 | ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK) | ||
465 | #define CCI500_PMU_EVENT_CODE(event) \ | ||
466 | ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK) | ||
467 | |||
468 | #define CCI500_SLAVE_PORT_MIN_EV 0x00 | ||
469 | #define CCI500_SLAVE_PORT_MAX_EV 0x1f | ||
470 | #define CCI500_MASTER_PORT_MIN_EV 0x00 | ||
471 | #define CCI500_MASTER_PORT_MAX_EV 0x06 | ||
472 | #define CCI500_GLOBAL_PORT_MIN_EV 0x00 | ||
473 | #define CCI500_GLOBAL_PORT_MAX_EV 0x0f | ||
474 | |||
475 | |||
476 | #define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | ||
477 | CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \ | ||
478 | (unsigned long) _config) | ||
479 | |||
480 | static ssize_t cci500_pmu_global_event_show(struct device *dev, | ||
481 | struct device_attribute *attr, char *buf); | ||
482 | |||
483 | static struct dev_ext_attribute cci500_pmu_format_attrs[] = { | ||
484 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | ||
485 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | ||
486 | }; | ||
487 | |||
488 | static struct dev_ext_attribute cci500_pmu_event_attrs[] = { | ||
489 | /* Slave events */ | ||
490 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | ||
491 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | ||
492 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | ||
493 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | ||
494 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | ||
495 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | ||
496 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | ||
497 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | ||
498 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | ||
499 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | ||
500 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | ||
501 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | ||
502 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | ||
503 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | ||
504 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | ||
505 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | ||
506 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | ||
507 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | ||
508 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | ||
509 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | ||
510 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | ||
511 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | ||
512 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | ||
513 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | ||
514 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | ||
515 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | ||
516 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | ||
517 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | ||
518 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | ||
519 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | ||
520 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | ||
521 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | ||
522 | |||
523 | /* Master events */ | ||
524 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | ||
525 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | ||
526 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | ||
527 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | ||
528 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | ||
529 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | ||
530 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | ||
531 | |||
532 | /* Global events */ | ||
533 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | ||
534 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | ||
535 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | ||
536 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | ||
537 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | ||
538 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | ||
539 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | ||
540 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | ||
541 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | ||
542 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | ||
543 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | ||
544 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | ||
545 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | ||
546 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | ||
547 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE), | ||
548 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | ||
549 | }; | ||
550 | |||
551 | static ssize_t cci500_pmu_global_event_show(struct device *dev, | ||
552 | struct device_attribute *attr, char *buf) | ||
553 | { | ||
554 | struct dev_ext_attribute *eattr = container_of(attr, | ||
555 | struct dev_ext_attribute, attr); | ||
556 | /* Global events have single fixed source code */ | ||
557 | return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", | ||
558 | (unsigned long)eattr->var, CCI500_PORT_GLOBAL); | ||
559 | } | ||
560 | |||
561 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, | ||
562 | unsigned long hw_event) | ||
563 | { | ||
564 | u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event); | ||
565 | u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event); | ||
566 | int if_type; | ||
567 | |||
568 | if (hw_event & ~CCI500_PMU_EVENT_MASK) | ||
569 | return -ENOENT; | ||
570 | |||
571 | switch (ev_source) { | ||
572 | case CCI500_PORT_S0: | ||
573 | case CCI500_PORT_S1: | ||
574 | case CCI500_PORT_S2: | ||
575 | case CCI500_PORT_S3: | ||
576 | case CCI500_PORT_S4: | ||
577 | case CCI500_PORT_S5: | ||
578 | case CCI500_PORT_S6: | ||
579 | if_type = CCI_IF_SLAVE; | ||
580 | break; | ||
581 | case CCI500_PORT_M0: | ||
582 | case CCI500_PORT_M1: | ||
583 | case CCI500_PORT_M2: | ||
584 | case CCI500_PORT_M3: | ||
585 | case CCI500_PORT_M4: | ||
586 | case CCI500_PORT_M5: | ||
587 | if_type = CCI_IF_MASTER; | ||
588 | break; | ||
589 | case CCI500_PORT_GLOBAL: | ||
590 | if_type = CCI_IF_GLOBAL; | ||
591 | break; | ||
592 | default: | ||
593 | return -ENOENT; | ||
594 | } | ||
595 | |||
596 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | ||
597 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | ||
598 | return hw_event; | ||
599 | |||
600 | return -ENOENT; | ||
601 | } | ||
602 | #endif /* CONFIG_ARM_CCI500_PMU */ | ||
603 | |||
604 | static ssize_t cci_pmu_format_show(struct device *dev, | ||
605 | struct device_attribute *attr, char *buf) | ||
606 | { | ||
607 | struct dev_ext_attribute *eattr = container_of(attr, | ||
608 | struct dev_ext_attribute, attr); | ||
609 | return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); | ||
610 | } | ||
611 | |||
612 | static ssize_t cci_pmu_event_show(struct device *dev, | ||
613 | struct device_attribute *attr, char *buf) | ||
614 | { | ||
615 | struct dev_ext_attribute *eattr = container_of(attr, | ||
616 | struct dev_ext_attribute, attr); | ||
617 | /* source parameter is mandatory for normal PMU events */ | ||
618 | return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", | ||
619 | (unsigned long)eattr->var); | ||
620 | } | ||
235 | 621 | ||
236 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) | 622 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) |
237 | { | 623 | { |
238 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | 624 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); |
239 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | ||
240 | } | 625 | } |
241 | 626 | ||
242 | static u32 pmu_read_register(int idx, unsigned int offset) | 627 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) |
243 | { | 628 | { |
244 | return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | 629 | return readl_relaxed(cci_pmu->base + |
630 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | ||
245 | } | 631 | } |
246 | 632 | ||
247 | static void pmu_write_register(u32 value, int idx, unsigned int offset) | 633 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, |
634 | int idx, unsigned int offset) | ||
248 | { | 635 | { |
249 | return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | 636 | return writel_relaxed(value, cci_pmu->base + |
637 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | ||
250 | } | 638 | } |
251 | 639 | ||
252 | static void pmu_disable_counter(int idx) | 640 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) |
253 | { | 641 | { |
254 | pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); | 642 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); |
255 | } | 643 | } |
256 | 644 | ||
257 | static void pmu_enable_counter(int idx) | 645 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) |
258 | { | 646 | { |
259 | pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); | 647 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); |
260 | } | 648 | } |
261 | 649 | ||
262 | static void pmu_set_event(int idx, unsigned long event) | 650 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) |
263 | { | 651 | { |
264 | pmu_write_register(event, idx, CCI_PMU_EVT_SEL); | 652 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); |
265 | } | 653 | } |
266 | 654 | ||
655 | /* | ||
656 | * Returns the number of programmable counters actually implemented | ||
657 | * by the cci | ||
658 | */ | ||
267 | static u32 pmu_get_max_counters(void) | 659 | static u32 pmu_get_max_counters(void) |
268 | { | 660 | { |
269 | u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | 661 | return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & |
270 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | 662 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; |
271 | |||
272 | /* add 1 for cycle counter */ | ||
273 | return n_cnts + 1; | ||
274 | } | 663 | } |
275 | 664 | ||
276 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) | 665 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) |
277 | { | 666 | { |
278 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | 667 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
279 | struct hw_perf_event *hw_event = &event->hw; | 668 | unsigned long cci_event = event->hw.config_base; |
280 | unsigned long cci_event = hw_event->config_base; | ||
281 | int idx; | 669 | int idx; |
282 | 670 | ||
283 | if (cci_event == CCI_PMU_CYCLES) { | 671 | if (cci_pmu->model->get_event_idx) |
284 | if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | 672 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); |
285 | return -EAGAIN; | ||
286 | 673 | ||
287 | return CCI_PMU_CYCLE_CNTR_IDX; | 674 | /* Generic code to find an unused idx from the mask */ |
288 | } | 675 | for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) |
289 | |||
290 | for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | ||
291 | if (!test_and_set_bit(idx, hw->used_mask)) | 676 | if (!test_and_set_bit(idx, hw->used_mask)) |
292 | return idx; | 677 | return idx; |
293 | 678 | ||
@@ -297,18 +682,13 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev | |||
297 | 682 | ||
298 | static int pmu_map_event(struct perf_event *event) | 683 | static int pmu_map_event(struct perf_event *event) |
299 | { | 684 | { |
300 | int mapping; | 685 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
301 | unsigned long config = event->attr.config; | ||
302 | 686 | ||
303 | if (event->attr.type < PERF_TYPE_MAX) | 687 | if (event->attr.type < PERF_TYPE_MAX || |
688 | !cci_pmu->model->validate_hw_event) | ||
304 | return -ENOENT; | 689 | return -ENOENT; |
305 | 690 | ||
306 | if (config == CCI_PMU_CYCLES) | 691 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); |
307 | mapping = config; | ||
308 | else | ||
309 | mapping = pmu_validate_hw_event(config); | ||
310 | |||
311 | return mapping; | ||
312 | } | 692 | } |
313 | 693 | ||
314 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | 694 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) |
@@ -319,7 +699,7 @@ static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |||
319 | if (unlikely(!pmu_device)) | 699 | if (unlikely(!pmu_device)) |
320 | return -ENODEV; | 700 | return -ENODEV; |
321 | 701 | ||
322 | if (pmu->nr_irqs < 1) { | 702 | if (cci_pmu->nr_irqs < 1) { |
323 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | 703 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); |
324 | return -ENODEV; | 704 | return -ENODEV; |
325 | } | 705 | } |
@@ -331,16 +711,16 @@ static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |||
331 | * | 711 | * |
332 | * This should allow handling of non-unique interrupt for the counters. | 712 | * This should allow handling of non-unique interrupt for the counters. |
333 | */ | 713 | */ |
334 | for (i = 0; i < pmu->nr_irqs; i++) { | 714 | for (i = 0; i < cci_pmu->nr_irqs; i++) { |
335 | int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, | 715 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, |
336 | "arm-cci-pmu", cci_pmu); | 716 | "arm-cci-pmu", cci_pmu); |
337 | if (err) { | 717 | if (err) { |
338 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | 718 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", |
339 | pmu->irqs[i]); | 719 | cci_pmu->irqs[i]); |
340 | return err; | 720 | return err; |
341 | } | 721 | } |
342 | 722 | ||
343 | set_bit(i, &pmu->active_irqs); | 723 | set_bit(i, &cci_pmu->active_irqs); |
344 | } | 724 | } |
345 | 725 | ||
346 | return 0; | 726 | return 0; |
@@ -350,11 +730,11 @@ static void pmu_free_irq(struct cci_pmu *cci_pmu) | |||
350 | { | 730 | { |
351 | int i; | 731 | int i; |
352 | 732 | ||
353 | for (i = 0; i < pmu->nr_irqs; i++) { | 733 | for (i = 0; i < cci_pmu->nr_irqs; i++) { |
354 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | 734 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) |
355 | continue; | 735 | continue; |
356 | 736 | ||
357 | free_irq(pmu->irqs[i], cci_pmu); | 737 | free_irq(cci_pmu->irqs[i], cci_pmu); |
358 | } | 738 | } |
359 | } | 739 | } |
360 | 740 | ||
@@ -369,7 +749,7 @@ static u32 pmu_read_counter(struct perf_event *event) | |||
369 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 749 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); |
370 | return 0; | 750 | return 0; |
371 | } | 751 | } |
372 | value = pmu_read_register(idx, CCI_PMU_CNTR); | 752 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); |
373 | 753 | ||
374 | return value; | 754 | return value; |
375 | } | 755 | } |
@@ -383,7 +763,7 @@ static void pmu_write_counter(struct perf_event *event, u32 value) | |||
383 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | 763 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) |
384 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 764 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); |
385 | else | 765 | else |
386 | pmu_write_register(value, idx, CCI_PMU_CNTR); | 766 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); |
387 | } | 767 | } |
388 | 768 | ||
389 | static u64 pmu_event_update(struct perf_event *event) | 769 | static u64 pmu_event_update(struct perf_event *event) |
@@ -427,7 +807,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |||
427 | { | 807 | { |
428 | unsigned long flags; | 808 | unsigned long flags; |
429 | struct cci_pmu *cci_pmu = dev; | 809 | struct cci_pmu *cci_pmu = dev; |
430 | struct cci_pmu_hw_events *events = &pmu->hw_events; | 810 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; |
431 | int idx, handled = IRQ_NONE; | 811 | int idx, handled = IRQ_NONE; |
432 | 812 | ||
433 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 813 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
@@ -436,7 +816,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |||
436 | * This should work regardless of whether we have per-counter overflow | 816 | * This should work regardless of whether we have per-counter overflow |
437 | * interrupt or a combined overflow interrupt. | 817 | * interrupt or a combined overflow interrupt. |
438 | */ | 818 | */ |
439 | for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | 819 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { |
440 | struct perf_event *event = events->events[idx]; | 820 | struct perf_event *event = events->events[idx]; |
441 | struct hw_perf_event *hw_counter; | 821 | struct hw_perf_event *hw_counter; |
442 | 822 | ||
@@ -446,11 +826,12 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |||
446 | hw_counter = &event->hw; | 826 | hw_counter = &event->hw; |
447 | 827 | ||
448 | /* Did this counter overflow? */ | 828 | /* Did this counter overflow? */ |
449 | if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) & | 829 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & |
450 | CCI_PMU_OVRFLW_FLAG)) | 830 | CCI_PMU_OVRFLW_FLAG)) |
451 | continue; | 831 | continue; |
452 | 832 | ||
453 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | 833 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, |
834 | CCI_PMU_OVRFLW); | ||
454 | 835 | ||
455 | pmu_event_update(event); | 836 | pmu_event_update(event); |
456 | pmu_event_set_period(event); | 837 | pmu_event_set_period(event); |
@@ -492,7 +873,7 @@ static void cci_pmu_enable(struct pmu *pmu) | |||
492 | { | 873 | { |
493 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | 874 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); |
494 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | 875 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; |
495 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events); | 876 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); |
496 | unsigned long flags; | 877 | unsigned long flags; |
497 | u32 val; | 878 | u32 val; |
498 | 879 | ||
@@ -523,6 +904,16 @@ static void cci_pmu_disable(struct pmu *pmu) | |||
523 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | 904 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
524 | } | 905 | } |
525 | 906 | ||
907 | /* | ||
908 | * Check if the idx represents a non-programmable counter. | ||
909 | * All the fixed event counters are mapped before the programmable | ||
910 | * counters. | ||
911 | */ | ||
912 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | ||
913 | { | ||
914 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | ||
915 | } | ||
916 | |||
526 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | 917 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) |
527 | { | 918 | { |
528 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | 919 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
@@ -547,12 +938,12 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |||
547 | 938 | ||
548 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | 939 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); |
549 | 940 | ||
550 | /* Configure the event to count, unless you are counting cycles */ | 941 | /* Configure the counter unless you are counting a fixed event */ |
551 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | 942 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) |
552 | pmu_set_event(idx, hwc->config_base); | 943 | pmu_set_event(cci_pmu, idx, hwc->config_base); |
553 | 944 | ||
554 | pmu_event_set_period(event); | 945 | pmu_event_set_period(event); |
555 | pmu_enable_counter(idx); | 946 | pmu_enable_counter(cci_pmu, idx); |
556 | 947 | ||
557 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | 948 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
558 | } | 949 | } |
@@ -575,7 +966,7 @@ static void cci_pmu_stop(struct perf_event *event, int pmu_flags) | |||
575 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | 966 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See |
576 | * cci_pmu_start() | 967 | * cci_pmu_start() |
577 | */ | 968 | */ |
578 | pmu_disable_counter(idx); | 969 | pmu_disable_counter(cci_pmu, idx); |
579 | pmu_event_update(event); | 970 | pmu_event_update(event); |
580 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 971 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
581 | } | 972 | } |
@@ -655,13 +1046,16 @@ static int | |||
655 | validate_group(struct perf_event *event) | 1046 | validate_group(struct perf_event *event) |
656 | { | 1047 | { |
657 | struct perf_event *sibling, *leader = event->group_leader; | 1048 | struct perf_event *sibling, *leader = event->group_leader; |
1049 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
1050 | unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; | ||
658 | struct cci_pmu_hw_events fake_pmu = { | 1051 | struct cci_pmu_hw_events fake_pmu = { |
659 | /* | 1052 | /* |
660 | * Initialise the fake PMU. We only need to populate the | 1053 | * Initialise the fake PMU. We only need to populate the |
661 | * used_mask for the purposes of validation. | 1054 | * used_mask for the purposes of validation. |
662 | */ | 1055 | */ |
663 | .used_mask = { 0 }, | 1056 | .used_mask = mask, |
664 | }; | 1057 | }; |
1058 | memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); | ||
665 | 1059 | ||
666 | if (!validate_event(event->pmu, &fake_pmu, leader)) | 1060 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
667 | return -EINVAL; | 1061 | return -EINVAL; |
@@ -779,20 +1173,27 @@ static int cci_pmu_event_init(struct perf_event *event) | |||
779 | return err; | 1173 | return err; |
780 | } | 1174 | } |
781 | 1175 | ||
782 | static ssize_t pmu_attr_cpumask_show(struct device *dev, | 1176 | static ssize_t pmu_cpumask_attr_show(struct device *dev, |
783 | struct device_attribute *attr, char *buf) | 1177 | struct device_attribute *attr, char *buf) |
784 | { | 1178 | { |
1179 | struct dev_ext_attribute *eattr = container_of(attr, | ||
1180 | struct dev_ext_attribute, attr); | ||
1181 | struct cci_pmu *cci_pmu = eattr->var; | ||
1182 | |||
785 | int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", | 1183 | int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", |
786 | cpumask_pr_args(&pmu->cpus)); | 1184 | cpumask_pr_args(&cci_pmu->cpus)); |
787 | buf[n++] = '\n'; | 1185 | buf[n++] = '\n'; |
788 | buf[n] = '\0'; | 1186 | buf[n] = '\0'; |
789 | return n; | 1187 | return n; |
790 | } | 1188 | } |
791 | 1189 | ||
792 | static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL); | 1190 | static struct dev_ext_attribute pmu_cpumask_attr = { |
1191 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL), | ||
1192 | NULL, /* Populated in cci_pmu_init */ | ||
1193 | }; | ||
793 | 1194 | ||
794 | static struct attribute *pmu_attrs[] = { | 1195 | static struct attribute *pmu_attrs[] = { |
795 | &dev_attr_cpumask.attr, | 1196 | &pmu_cpumask_attr.attr.attr, |
796 | NULL, | 1197 | NULL, |
797 | }; | 1198 | }; |
798 | 1199 | ||
@@ -800,14 +1201,78 @@ static struct attribute_group pmu_attr_group = { | |||
800 | .attrs = pmu_attrs, | 1201 | .attrs = pmu_attrs, |
801 | }; | 1202 | }; |
802 | 1203 | ||
1204 | static struct attribute_group pmu_format_attr_group = { | ||
1205 | .name = "format", | ||
1206 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | ||
1207 | }; | ||
1208 | |||
1209 | static struct attribute_group pmu_event_attr_group = { | ||
1210 | .name = "events", | ||
1211 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | ||
1212 | }; | ||
1213 | |||
803 | static const struct attribute_group *pmu_attr_groups[] = { | 1214 | static const struct attribute_group *pmu_attr_groups[] = { |
804 | &pmu_attr_group, | 1215 | &pmu_attr_group, |
1216 | &pmu_format_attr_group, | ||
1217 | &pmu_event_attr_group, | ||
805 | NULL | 1218 | NULL |
806 | }; | 1219 | }; |
807 | 1220 | ||
1221 | static struct attribute **alloc_attrs(struct platform_device *pdev, | ||
1222 | int n, struct dev_ext_attribute *source) | ||
1223 | { | ||
1224 | int i; | ||
1225 | struct attribute **attrs; | ||
1226 | |||
1227 | /* Alloc n + 1 (for terminating NULL) */ | ||
1228 | attrs = devm_kcalloc(&pdev->dev, n + 1, sizeof(struct attribute *), | ||
1229 | GFP_KERNEL); | ||
1230 | if (!attrs) | ||
1231 | return attrs; | ||
1232 | for(i = 0; i < n; i++) | ||
1233 | attrs[i] = &source[i].attr.attr; | ||
1234 | return attrs; | ||
1235 | } | ||
1236 | |||
1237 | static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev) | ||
1238 | { | ||
1239 | const struct cci_pmu_model *model = cci_pmu->model; | ||
1240 | struct attribute **attrs; | ||
1241 | |||
1242 | /* | ||
1243 | * All allocations below are managed, hence doesn't need to be | ||
1244 | * free'd explicitly in case of an error. | ||
1245 | */ | ||
1246 | |||
1247 | if (model->nevent_attrs) { | ||
1248 | attrs = alloc_attrs(pdev, model->nevent_attrs, | ||
1249 | model->event_attrs); | ||
1250 | if (!attrs) | ||
1251 | return -ENOMEM; | ||
1252 | pmu_event_attr_group.attrs = attrs; | ||
1253 | } | ||
1254 | if (model->nformat_attrs) { | ||
1255 | attrs = alloc_attrs(pdev, model->nformat_attrs, | ||
1256 | model->format_attrs); | ||
1257 | if (!attrs) | ||
1258 | return -ENOMEM; | ||
1259 | pmu_format_attr_group.attrs = attrs; | ||
1260 | } | ||
1261 | pmu_cpumask_attr.var = cci_pmu; | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
808 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | 1266 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) |
809 | { | 1267 | { |
810 | char *name = cci_pmu->model->name; | 1268 | char *name = cci_pmu->model->name; |
1269 | u32 num_cntrs; | ||
1270 | int rc; | ||
1271 | |||
1272 | rc = cci_pmu_init_attrs(cci_pmu, pdev); | ||
1273 | if (rc) | ||
1274 | return rc; | ||
1275 | |||
811 | cci_pmu->pmu = (struct pmu) { | 1276 | cci_pmu->pmu = (struct pmu) { |
812 | .name = cci_pmu->model->name, | 1277 | .name = cci_pmu->model->name, |
813 | .task_ctx_nr = perf_invalid_context, | 1278 | .task_ctx_nr = perf_invalid_context, |
@@ -823,7 +1288,15 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |||
823 | }; | 1288 | }; |
824 | 1289 | ||
825 | cci_pmu->plat_device = pdev; | 1290 | cci_pmu->plat_device = pdev; |
826 | cci_pmu->num_events = pmu_get_max_counters(); | 1291 | num_cntrs = pmu_get_max_counters(); |
1292 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { | ||
1293 | dev_warn(&pdev->dev, | ||
1294 | "PMU implements more counters(%d) than supported by" | ||
1295 | " the model(%d), truncated.", | ||
1296 | num_cntrs, cci_pmu->model->num_hw_cntrs); | ||
1297 | num_cntrs = cci_pmu->model->num_hw_cntrs; | ||
1298 | } | ||
1299 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | ||
827 | 1300 | ||
828 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | 1301 | return perf_pmu_register(&cci_pmu->pmu, name, -1); |
829 | } | 1302 | } |
@@ -831,12 +1304,14 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |||
831 | static int cci_pmu_cpu_notifier(struct notifier_block *self, | 1304 | static int cci_pmu_cpu_notifier(struct notifier_block *self, |
832 | unsigned long action, void *hcpu) | 1305 | unsigned long action, void *hcpu) |
833 | { | 1306 | { |
1307 | struct cci_pmu *cci_pmu = container_of(self, | ||
1308 | struct cci_pmu, cpu_nb); | ||
834 | unsigned int cpu = (long)hcpu; | 1309 | unsigned int cpu = (long)hcpu; |
835 | unsigned int target; | 1310 | unsigned int target; |
836 | 1311 | ||
837 | switch (action & ~CPU_TASKS_FROZEN) { | 1312 | switch (action & ~CPU_TASKS_FROZEN) { |
838 | case CPU_DOWN_PREPARE: | 1313 | case CPU_DOWN_PREPARE: |
839 | if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus)) | 1314 | if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) |
840 | break; | 1315 | break; |
841 | target = cpumask_any_but(cpu_online_mask, cpu); | 1316 | target = cpumask_any_but(cpu_online_mask, cpu); |
842 | if (target < 0) // UP, last CPU | 1317 | if (target < 0) // UP, last CPU |
@@ -845,7 +1320,7 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self, | |||
845 | * TODO: migrate context once core races on event->ctx have | 1320 | * TODO: migrate context once core races on event->ctx have |
846 | * been fixed. | 1321 | * been fixed. |
847 | */ | 1322 | */ |
848 | cpumask_set_cpu(target, &pmu->cpus); | 1323 | cpumask_set_cpu(target, &cci_pmu->cpus); |
849 | default: | 1324 | default: |
850 | break; | 1325 | break; |
851 | } | 1326 | } |
@@ -853,57 +1328,103 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self, | |||
853 | return NOTIFY_OK; | 1328 | return NOTIFY_OK; |
854 | } | 1329 | } |
855 | 1330 | ||
856 | static struct notifier_block cci_pmu_cpu_nb = { | ||
857 | .notifier_call = cci_pmu_cpu_notifier, | ||
858 | /* | ||
859 | * to migrate uncore events, our notifier should be executed | ||
860 | * before perf core's notifier. | ||
861 | */ | ||
862 | .priority = CPU_PRI_PERF + 1, | ||
863 | }; | ||
864 | |||
865 | static struct cci_pmu_model cci_pmu_models[] = { | 1331 | static struct cci_pmu_model cci_pmu_models[] = { |
866 | [CCI_REV_R0] = { | 1332 | #ifdef CONFIG_ARM_CCI400_PMU |
1333 | [CCI400_R0] = { | ||
867 | .name = "CCI_400", | 1334 | .name = "CCI_400", |
1335 | .fixed_hw_cntrs = 1, /* Cycle counter */ | ||
1336 | .num_hw_cntrs = 4, | ||
1337 | .cntr_size = SZ_4K, | ||
1338 | .format_attrs = cci400_pmu_format_attrs, | ||
1339 | .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs), | ||
1340 | .event_attrs = cci400_r0_pmu_event_attrs, | ||
1341 | .nevent_attrs = ARRAY_SIZE(cci400_r0_pmu_event_attrs), | ||
868 | .event_ranges = { | 1342 | .event_ranges = { |
869 | [CCI_IF_SLAVE] = { | 1343 | [CCI_IF_SLAVE] = { |
870 | CCI_REV_R0_SLAVE_PORT_MIN_EV, | 1344 | CCI400_R0_SLAVE_PORT_MIN_EV, |
871 | CCI_REV_R0_SLAVE_PORT_MAX_EV, | 1345 | CCI400_R0_SLAVE_PORT_MAX_EV, |
872 | }, | 1346 | }, |
873 | [CCI_IF_MASTER] = { | 1347 | [CCI_IF_MASTER] = { |
874 | CCI_REV_R0_MASTER_PORT_MIN_EV, | 1348 | CCI400_R0_MASTER_PORT_MIN_EV, |
875 | CCI_REV_R0_MASTER_PORT_MAX_EV, | 1349 | CCI400_R0_MASTER_PORT_MAX_EV, |
876 | }, | 1350 | }, |
877 | }, | 1351 | }, |
1352 | .validate_hw_event = cci400_validate_hw_event, | ||
1353 | .get_event_idx = cci400_get_event_idx, | ||
878 | }, | 1354 | }, |
879 | [CCI_REV_R1] = { | 1355 | [CCI400_R1] = { |
880 | .name = "CCI_400_r1", | 1356 | .name = "CCI_400_r1", |
1357 | .fixed_hw_cntrs = 1, /* Cycle counter */ | ||
1358 | .num_hw_cntrs = 4, | ||
1359 | .cntr_size = SZ_4K, | ||
1360 | .format_attrs = cci400_pmu_format_attrs, | ||
1361 | .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs), | ||
1362 | .event_attrs = cci400_r1_pmu_event_attrs, | ||
1363 | .nevent_attrs = ARRAY_SIZE(cci400_r1_pmu_event_attrs), | ||
881 | .event_ranges = { | 1364 | .event_ranges = { |
882 | [CCI_IF_SLAVE] = { | 1365 | [CCI_IF_SLAVE] = { |
883 | CCI_REV_R1_SLAVE_PORT_MIN_EV, | 1366 | CCI400_R1_SLAVE_PORT_MIN_EV, |
884 | CCI_REV_R1_SLAVE_PORT_MAX_EV, | 1367 | CCI400_R1_SLAVE_PORT_MAX_EV, |
885 | }, | 1368 | }, |
886 | [CCI_IF_MASTER] = { | 1369 | [CCI_IF_MASTER] = { |
887 | CCI_REV_R1_MASTER_PORT_MIN_EV, | 1370 | CCI400_R1_MASTER_PORT_MIN_EV, |
888 | CCI_REV_R1_MASTER_PORT_MAX_EV, | 1371 | CCI400_R1_MASTER_PORT_MAX_EV, |
889 | }, | 1372 | }, |
890 | }, | 1373 | }, |
1374 | .validate_hw_event = cci400_validate_hw_event, | ||
1375 | .get_event_idx = cci400_get_event_idx, | ||
891 | }, | 1376 | }, |
1377 | #endif | ||
1378 | #ifdef CONFIG_ARM_CCI500_PMU | ||
1379 | [CCI500_R0] = { | ||
1380 | .name = "CCI_500", | ||
1381 | .fixed_hw_cntrs = 0, | ||
1382 | .num_hw_cntrs = 8, | ||
1383 | .cntr_size = SZ_64K, | ||
1384 | .format_attrs = cci500_pmu_format_attrs, | ||
1385 | .nformat_attrs = ARRAY_SIZE(cci500_pmu_format_attrs), | ||
1386 | .event_attrs = cci500_pmu_event_attrs, | ||
1387 | .nevent_attrs = ARRAY_SIZE(cci500_pmu_event_attrs), | ||
1388 | .event_ranges = { | ||
1389 | [CCI_IF_SLAVE] = { | ||
1390 | CCI500_SLAVE_PORT_MIN_EV, | ||
1391 | CCI500_SLAVE_PORT_MAX_EV, | ||
1392 | }, | ||
1393 | [CCI_IF_MASTER] = { | ||
1394 | CCI500_MASTER_PORT_MIN_EV, | ||
1395 | CCI500_MASTER_PORT_MAX_EV, | ||
1396 | }, | ||
1397 | [CCI_IF_GLOBAL] = { | ||
1398 | CCI500_GLOBAL_PORT_MIN_EV, | ||
1399 | CCI500_GLOBAL_PORT_MAX_EV, | ||
1400 | }, | ||
1401 | }, | ||
1402 | .validate_hw_event = cci500_validate_hw_event, | ||
1403 | }, | ||
1404 | #endif | ||
892 | }; | 1405 | }; |
893 | 1406 | ||
894 | static const struct of_device_id arm_cci_pmu_matches[] = { | 1407 | static const struct of_device_id arm_cci_pmu_matches[] = { |
1408 | #ifdef CONFIG_ARM_CCI400_PMU | ||
895 | { | 1409 | { |
896 | .compatible = "arm,cci-400-pmu", | 1410 | .compatible = "arm,cci-400-pmu", |
897 | .data = NULL, | 1411 | .data = NULL, |
898 | }, | 1412 | }, |
899 | { | 1413 | { |
900 | .compatible = "arm,cci-400-pmu,r0", | 1414 | .compatible = "arm,cci-400-pmu,r0", |
901 | .data = &cci_pmu_models[CCI_REV_R0], | 1415 | .data = &cci_pmu_models[CCI400_R0], |
902 | }, | 1416 | }, |
903 | { | 1417 | { |
904 | .compatible = "arm,cci-400-pmu,r1", | 1418 | .compatible = "arm,cci-400-pmu,r1", |
905 | .data = &cci_pmu_models[CCI_REV_R1], | 1419 | .data = &cci_pmu_models[CCI400_R1], |
1420 | }, | ||
1421 | #endif | ||
1422 | #ifdef CONFIG_ARM_CCI500_PMU | ||
1423 | { | ||
1424 | .compatible = "arm,cci-500-pmu,r0", | ||
1425 | .data = &cci_pmu_models[CCI500_R0], | ||
906 | }, | 1426 | }, |
1427 | #endif | ||
907 | {}, | 1428 | {}, |
908 | }; | 1429 | }; |
909 | 1430 | ||
@@ -932,68 +1453,114 @@ static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | |||
932 | return false; | 1453 | return false; |
933 | } | 1454 | } |
934 | 1455 | ||
935 | static int cci_pmu_probe(struct platform_device *pdev) | 1456 | static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) |
936 | { | 1457 | { |
937 | struct resource *res; | 1458 | struct cci_pmu *cci_pmu; |
938 | int i, ret, irq; | ||
939 | const struct cci_pmu_model *model; | 1459 | const struct cci_pmu_model *model; |
940 | 1460 | ||
1461 | /* | ||
1462 | * All allocations are devm_* hence we don't have to free | ||
1463 | * them explicitly on an error, as it would end up in driver | ||
1464 | * detach. | ||
1465 | */ | ||
941 | model = get_cci_model(pdev); | 1466 | model = get_cci_model(pdev); |
942 | if (!model) { | 1467 | if (!model) { |
943 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | 1468 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); |
944 | return -ENODEV; | 1469 | return ERR_PTR(-ENODEV); |
945 | } | 1470 | } |
946 | 1471 | ||
947 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | 1472 | cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); |
948 | if (!pmu) | 1473 | if (!cci_pmu) |
949 | return -ENOMEM; | 1474 | return ERR_PTR(-ENOMEM); |
1475 | |||
1476 | cci_pmu->model = model; | ||
1477 | cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), | ||
1478 | sizeof(*cci_pmu->irqs), GFP_KERNEL); | ||
1479 | if (!cci_pmu->irqs) | ||
1480 | return ERR_PTR(-ENOMEM); | ||
1481 | cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, | ||
1482 | CCI_PMU_MAX_HW_CNTRS(model), | ||
1483 | sizeof(*cci_pmu->hw_events.events), | ||
1484 | GFP_KERNEL); | ||
1485 | if (!cci_pmu->hw_events.events) | ||
1486 | return ERR_PTR(-ENOMEM); | ||
1487 | cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, | ||
1488 | BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), | ||
1489 | sizeof(*cci_pmu->hw_events.used_mask), | ||
1490 | GFP_KERNEL); | ||
1491 | if (!cci_pmu->hw_events.used_mask) | ||
1492 | return ERR_PTR(-ENOMEM); | ||
1493 | |||
1494 | return cci_pmu; | ||
1495 | } | ||
1496 | |||
1497 | |||
1498 | static int cci_pmu_probe(struct platform_device *pdev) | ||
1499 | { | ||
1500 | struct resource *res; | ||
1501 | struct cci_pmu *cci_pmu; | ||
1502 | int i, ret, irq; | ||
1503 | |||
1504 | cci_pmu = cci_pmu_alloc(pdev); | ||
1505 | if (IS_ERR(cci_pmu)) | ||
1506 | return PTR_ERR(cci_pmu); | ||
950 | 1507 | ||
951 | pmu->model = model; | ||
952 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1508 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
953 | pmu->base = devm_ioremap_resource(&pdev->dev, res); | 1509 | cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); |
954 | if (IS_ERR(pmu->base)) | 1510 | if (IS_ERR(cci_pmu->base)) |
955 | return -ENOMEM; | 1511 | return -ENOMEM; |
956 | 1512 | ||
957 | /* | 1513 | /* |
958 | * CCI PMU has 5 overflow signals - one per counter; but some may be tied | 1514 | * CCI PMU has one overflow interrupt per counter; but some may be tied |
959 | * together to a common interrupt. | 1515 | * together to a common interrupt. |
960 | */ | 1516 | */ |
961 | pmu->nr_irqs = 0; | 1517 | cci_pmu->nr_irqs = 0; |
962 | for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { | 1518 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { |
963 | irq = platform_get_irq(pdev, i); | 1519 | irq = platform_get_irq(pdev, i); |
964 | if (irq < 0) | 1520 | if (irq < 0) |
965 | break; | 1521 | break; |
966 | 1522 | ||
967 | if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) | 1523 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) |
968 | continue; | 1524 | continue; |
969 | 1525 | ||
970 | pmu->irqs[pmu->nr_irqs++] = irq; | 1526 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; |
971 | } | 1527 | } |
972 | 1528 | ||
973 | /* | 1529 | /* |
974 | * Ensure that the device tree has as many interrupts as the number | 1530 | * Ensure that the device tree has as many interrupts as the number |
975 | * of counters. | 1531 | * of counters. |
976 | */ | 1532 | */ |
977 | if (i < CCI_PMU_MAX_HW_EVENTS) { | 1533 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { |
978 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | 1534 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", |
979 | i, CCI_PMU_MAX_HW_EVENTS); | 1535 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); |
980 | return -EINVAL; | 1536 | return -EINVAL; |
981 | } | 1537 | } |
982 | 1538 | ||
983 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); | 1539 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); |
984 | mutex_init(&pmu->reserve_mutex); | 1540 | mutex_init(&cci_pmu->reserve_mutex); |
985 | atomic_set(&pmu->active_events, 0); | 1541 | atomic_set(&cci_pmu->active_events, 0); |
986 | cpumask_set_cpu(smp_processor_id(), &pmu->cpus); | 1542 | cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); |
1543 | |||
1544 | cci_pmu->cpu_nb = (struct notifier_block) { | ||
1545 | .notifier_call = cci_pmu_cpu_notifier, | ||
1546 | /* | ||
1547 | * to migrate uncore events, our notifier should be executed | ||
1548 | * before perf core's notifier. | ||
1549 | */ | ||
1550 | .priority = CPU_PRI_PERF + 1, | ||
1551 | }; | ||
987 | 1552 | ||
988 | ret = register_cpu_notifier(&cci_pmu_cpu_nb); | 1553 | ret = register_cpu_notifier(&cci_pmu->cpu_nb); |
989 | if (ret) | 1554 | if (ret) |
990 | return ret; | 1555 | return ret; |
991 | 1556 | ||
992 | ret = cci_pmu_init(pmu, pdev); | 1557 | ret = cci_pmu_init(cci_pmu, pdev); |
993 | if (ret) | 1558 | if (ret) { |
1559 | unregister_cpu_notifier(&cci_pmu->cpu_nb); | ||
994 | return ret; | 1560 | return ret; |
1561 | } | ||
995 | 1562 | ||
996 | pr_info("ARM %s PMU driver probed", pmu->model->name); | 1563 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
997 | return 0; | 1564 | return 0; |
998 | } | 1565 | } |
999 | 1566 | ||
@@ -1032,14 +1599,14 @@ static int __init cci_platform_init(void) | |||
1032 | return platform_driver_register(&cci_platform_driver); | 1599 | return platform_driver_register(&cci_platform_driver); |
1033 | } | 1600 | } |
1034 | 1601 | ||
1035 | #else /* !CONFIG_ARM_CCI400_PMU */ | 1602 | #else /* !CONFIG_ARM_CCI_PMU */ |
1036 | 1603 | ||
1037 | static int __init cci_platform_init(void) | 1604 | static int __init cci_platform_init(void) |
1038 | { | 1605 | { |
1039 | return 0; | 1606 | return 0; |
1040 | } | 1607 | } |
1041 | 1608 | ||
1042 | #endif /* CONFIG_ARM_CCI400_PMU */ | 1609 | #endif /* CONFIG_ARM_CCI_PMU */ |
1043 | 1610 | ||
1044 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL | 1611 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL |
1045 | 1612 | ||
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index aaa0f2a87118..7d9879e166cf 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
@@ -166,13 +166,17 @@ struct arm_ccn_dt { | |||
166 | 166 | ||
167 | struct hrtimer hrtimer; | 167 | struct hrtimer hrtimer; |
168 | 168 | ||
169 | cpumask_t cpu; | ||
170 | struct notifier_block cpu_nb; | ||
171 | |||
169 | struct pmu pmu; | 172 | struct pmu pmu; |
170 | }; | 173 | }; |
171 | 174 | ||
172 | struct arm_ccn { | 175 | struct arm_ccn { |
173 | struct device *dev; | 176 | struct device *dev; |
174 | void __iomem *base; | 177 | void __iomem *base; |
175 | unsigned irq_used:1; | 178 | unsigned int irq; |
179 | |||
176 | unsigned sbas_present:1; | 180 | unsigned sbas_present:1; |
177 | unsigned sbsx_present:1; | 181 | unsigned sbsx_present:1; |
178 | 182 | ||
@@ -212,7 +216,7 @@ static int arm_ccn_node_to_xp_port(int node) | |||
212 | 216 | ||
213 | static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) | 217 | static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) |
214 | { | 218 | { |
215 | *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24)); | 219 | *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24)); |
216 | *config |= (node_xp << 0) | (type << 8) | (port << 24); | 220 | *config |= (node_xp << 0) | (type << 8) | (port << 24); |
217 | } | 221 | } |
218 | 222 | ||
@@ -336,6 +340,23 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, | |||
336 | if (event->mask) | 340 | if (event->mask) |
337 | res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", | 341 | res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", |
338 | event->mask); | 342 | event->mask); |
343 | |||
344 | /* Arguments required by an event */ | ||
345 | switch (event->type) { | ||
346 | case CCN_TYPE_CYCLES: | ||
347 | break; | ||
348 | case CCN_TYPE_XP: | ||
349 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
350 | ",xp=?,port=?,vc=?,dir=?"); | ||
351 | if (event->event == CCN_EVENT_WATCHPOINT) | ||
352 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
353 | ",cmp_l=?,cmp_h=?,mask=?"); | ||
354 | break; | ||
355 | default: | ||
356 | res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); | ||
357 | break; | ||
358 | } | ||
359 | |||
339 | res += snprintf(buf + res, PAGE_SIZE - res, "\n"); | 360 | res += snprintf(buf + res, PAGE_SIZE - res, "\n"); |
340 | 361 | ||
341 | return res; | 362 | return res; |
@@ -521,6 +542,25 @@ static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { | |||
521 | .attrs = arm_ccn_pmu_cmp_mask_attrs, | 542 | .attrs = arm_ccn_pmu_cmp_mask_attrs, |
522 | }; | 543 | }; |
523 | 544 | ||
545 | static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, | ||
546 | struct device_attribute *attr, char *buf) | ||
547 | { | ||
548 | struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); | ||
549 | |||
550 | return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); | ||
551 | } | ||
552 | |||
553 | static struct device_attribute arm_ccn_pmu_cpumask_attr = | ||
554 | __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); | ||
555 | |||
556 | static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { | ||
557 | &arm_ccn_pmu_cpumask_attr.attr, | ||
558 | NULL, | ||
559 | }; | ||
560 | |||
561 | static struct attribute_group arm_ccn_pmu_cpumask_attr_group = { | ||
562 | .attrs = arm_ccn_pmu_cpumask_attrs, | ||
563 | }; | ||
524 | 564 | ||
525 | /* | 565 | /* |
526 | * Default poll period is 10ms, which is way over the top anyway, | 566 | * Default poll period is 10ms, which is way over the top anyway, |
@@ -542,6 +582,7 @@ static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { | |||
542 | &arm_ccn_pmu_events_attr_group, | 582 | &arm_ccn_pmu_events_attr_group, |
543 | &arm_ccn_pmu_format_attr_group, | 583 | &arm_ccn_pmu_format_attr_group, |
544 | &arm_ccn_pmu_cmp_mask_attr_group, | 584 | &arm_ccn_pmu_cmp_mask_attr_group, |
585 | &arm_ccn_pmu_cpumask_attr_group, | ||
545 | NULL | 586 | NULL |
546 | }; | 587 | }; |
547 | 588 | ||
@@ -587,7 +628,65 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b) | |||
587 | return 0; | 628 | return 0; |
588 | } | 629 | } |
589 | 630 | ||
590 | static void arm_ccn_pmu_event_destroy(struct perf_event *event) | 631 | static int arm_ccn_pmu_event_alloc(struct perf_event *event) |
632 | { | ||
633 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
634 | struct hw_perf_event *hw = &event->hw; | ||
635 | u32 node_xp, type, event_id; | ||
636 | struct arm_ccn_component *source; | ||
637 | int bit; | ||
638 | |||
639 | node_xp = CCN_CONFIG_NODE(event->attr.config); | ||
640 | type = CCN_CONFIG_TYPE(event->attr.config); | ||
641 | event_id = CCN_CONFIG_EVENT(event->attr.config); | ||
642 | |||
643 | /* Allocate the cycle counter */ | ||
644 | if (type == CCN_TYPE_CYCLES) { | ||
645 | if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, | ||
646 | ccn->dt.pmu_counters_mask)) | ||
647 | return -EAGAIN; | ||
648 | |||
649 | hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; | ||
650 | ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /* Allocate an event counter */ | ||
656 | hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, | ||
657 | CCN_NUM_PMU_EVENT_COUNTERS); | ||
658 | if (hw->idx < 0) { | ||
659 | dev_dbg(ccn->dev, "No more counters available!\n"); | ||
660 | return -EAGAIN; | ||
661 | } | ||
662 | |||
663 | if (type == CCN_TYPE_XP) | ||
664 | source = &ccn->xp[node_xp]; | ||
665 | else | ||
666 | source = &ccn->node[node_xp]; | ||
667 | ccn->dt.pmu_counters[hw->idx].source = source; | ||
668 | |||
669 | /* Allocate an event source or a watchpoint */ | ||
670 | if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) | ||
671 | bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, | ||
672 | CCN_NUM_XP_WATCHPOINTS); | ||
673 | else | ||
674 | bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, | ||
675 | CCN_NUM_PMU_EVENTS); | ||
676 | if (bit < 0) { | ||
677 | dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", | ||
678 | node_xp); | ||
679 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
680 | return -EAGAIN; | ||
681 | } | ||
682 | hw->config_base = bit; | ||
683 | |||
684 | ccn->dt.pmu_counters[hw->idx].event = event; | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static void arm_ccn_pmu_event_release(struct perf_event *event) | ||
591 | { | 690 | { |
592 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | 691 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); |
593 | struct hw_perf_event *hw = &event->hw; | 692 | struct hw_perf_event *hw = &event->hw; |
@@ -616,15 +715,14 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
616 | struct arm_ccn *ccn; | 715 | struct arm_ccn *ccn; |
617 | struct hw_perf_event *hw = &event->hw; | 716 | struct hw_perf_event *hw = &event->hw; |
618 | u32 node_xp, type, event_id; | 717 | u32 node_xp, type, event_id; |
619 | int valid, bit; | 718 | int valid; |
620 | struct arm_ccn_component *source; | ||
621 | int i; | 719 | int i; |
720 | struct perf_event *sibling; | ||
622 | 721 | ||
623 | if (event->attr.type != event->pmu->type) | 722 | if (event->attr.type != event->pmu->type) |
624 | return -ENOENT; | 723 | return -ENOENT; |
625 | 724 | ||
626 | ccn = pmu_to_arm_ccn(event->pmu); | 725 | ccn = pmu_to_arm_ccn(event->pmu); |
627 | event->destroy = arm_ccn_pmu_event_destroy; | ||
628 | 726 | ||
629 | if (hw->sample_period) { | 727 | if (hw->sample_period) { |
630 | dev_warn(ccn->dev, "Sampling not supported!\n"); | 728 | dev_warn(ccn->dev, "Sampling not supported!\n"); |
@@ -642,6 +740,16 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
642 | dev_warn(ccn->dev, "Can't provide per-task data!\n"); | 740 | dev_warn(ccn->dev, "Can't provide per-task data!\n"); |
643 | return -EOPNOTSUPP; | 741 | return -EOPNOTSUPP; |
644 | } | 742 | } |
743 | /* | ||
744 | * Many perf core operations (eg. events rotation) operate on a | ||
745 | * single CPU context. This is obvious for CPU PMUs, where one | ||
746 | * expects the same sets of events being observed on all CPUs, | ||
747 | * but can lead to issues for off-core PMUs, like CCN, where each | ||
748 | * event could be theoretically assigned to a different CPU. To | ||
749 | * mitigate this, we enforce CPU assignment to one, selected | ||
750 | * processor (the one described in the "cpumask" attribute). | ||
751 | */ | ||
752 | event->cpu = cpumask_first(&ccn->dt.cpu); | ||
645 | 753 | ||
646 | node_xp = CCN_CONFIG_NODE(event->attr.config); | 754 | node_xp = CCN_CONFIG_NODE(event->attr.config); |
647 | type = CCN_CONFIG_TYPE(event->attr.config); | 755 | type = CCN_CONFIG_TYPE(event->attr.config); |
@@ -711,48 +819,20 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
711 | node_xp, type, port); | 819 | node_xp, type, port); |
712 | } | 820 | } |
713 | 821 | ||
714 | /* Allocate the cycle counter */ | 822 | /* |
715 | if (type == CCN_TYPE_CYCLES) { | 823 | * We must NOT create groups containing mixed PMUs, although software |
716 | if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, | 824 | * events are acceptable (for example to create a CCN group |
717 | ccn->dt.pmu_counters_mask)) | 825 | * periodically read when a hrtimer aka cpu-clock leader triggers). |
718 | return -EAGAIN; | 826 | */ |
719 | 827 | if (event->group_leader->pmu != event->pmu && | |
720 | hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; | 828 | !is_software_event(event->group_leader)) |
721 | ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; | 829 | return -EINVAL; |
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | /* Allocate an event counter */ | ||
727 | hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, | ||
728 | CCN_NUM_PMU_EVENT_COUNTERS); | ||
729 | if (hw->idx < 0) { | ||
730 | dev_warn(ccn->dev, "No more counters available!\n"); | ||
731 | return -EAGAIN; | ||
732 | } | ||
733 | |||
734 | if (type == CCN_TYPE_XP) | ||
735 | source = &ccn->xp[node_xp]; | ||
736 | else | ||
737 | source = &ccn->node[node_xp]; | ||
738 | ccn->dt.pmu_counters[hw->idx].source = source; | ||
739 | |||
740 | /* Allocate an event source or a watchpoint */ | ||
741 | if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) | ||
742 | bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, | ||
743 | CCN_NUM_XP_WATCHPOINTS); | ||
744 | else | ||
745 | bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, | ||
746 | CCN_NUM_PMU_EVENTS); | ||
747 | if (bit < 0) { | ||
748 | dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", | ||
749 | node_xp); | ||
750 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
751 | return -EAGAIN; | ||
752 | } | ||
753 | hw->config_base = bit; | ||
754 | 830 | ||
755 | ccn->dt.pmu_counters[hw->idx].event = event; | 831 | list_for_each_entry(sibling, &event->group_leader->sibling_list, |
832 | group_entry) | ||
833 | if (sibling->pmu != event->pmu && | ||
834 | !is_software_event(sibling)) | ||
835 | return -EINVAL; | ||
756 | 836 | ||
757 | return 0; | 837 | return 0; |
758 | } | 838 | } |
@@ -835,9 +915,14 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) | |||
835 | arm_ccn_pmu_read_counter(ccn, hw->idx)); | 915 | arm_ccn_pmu_read_counter(ccn, hw->idx)); |
836 | hw->state = 0; | 916 | hw->state = 0; |
837 | 917 | ||
838 | if (!ccn->irq_used) | 918 | /* |
919 | * Pin the timer, so that the overflows are handled by the chosen | ||
920 | * event->cpu (this is the same one as presented in "cpumask" | ||
921 | * attribute). | ||
922 | */ | ||
923 | if (!ccn->irq) | ||
839 | hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), | 924 | hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), |
840 | HRTIMER_MODE_REL); | 925 | HRTIMER_MODE_REL_PINNED); |
841 | 926 | ||
842 | /* Set the DT bus input, engaging the counter */ | 927 | /* Set the DT bus input, engaging the counter */ |
843 | arm_ccn_pmu_xp_dt_config(event, 1); | 928 | arm_ccn_pmu_xp_dt_config(event, 1); |
@@ -852,7 +937,7 @@ static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) | |||
852 | /* Disable counting, setting the DT bus to pass-through mode */ | 937 | /* Disable counting, setting the DT bus to pass-through mode */ |
853 | arm_ccn_pmu_xp_dt_config(event, 0); | 938 | arm_ccn_pmu_xp_dt_config(event, 0); |
854 | 939 | ||
855 | if (!ccn->irq_used) | 940 | if (!ccn->irq) |
856 | hrtimer_cancel(&ccn->dt.hrtimer); | 941 | hrtimer_cancel(&ccn->dt.hrtimer); |
857 | 942 | ||
858 | /* Let the DT bus drain */ | 943 | /* Let the DT bus drain */ |
@@ -1014,8 +1099,13 @@ static void arm_ccn_pmu_event_config(struct perf_event *event) | |||
1014 | 1099 | ||
1015 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | 1100 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) |
1016 | { | 1101 | { |
1102 | int err; | ||
1017 | struct hw_perf_event *hw = &event->hw; | 1103 | struct hw_perf_event *hw = &event->hw; |
1018 | 1104 | ||
1105 | err = arm_ccn_pmu_event_alloc(event); | ||
1106 | if (err) | ||
1107 | return err; | ||
1108 | |||
1019 | arm_ccn_pmu_event_config(event); | 1109 | arm_ccn_pmu_event_config(event); |
1020 | 1110 | ||
1021 | hw->state = PERF_HES_STOPPED; | 1111 | hw->state = PERF_HES_STOPPED; |
@@ -1029,6 +1119,8 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | |||
1029 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) | 1119 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) |
1030 | { | 1120 | { |
1031 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); | 1121 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); |
1122 | |||
1123 | arm_ccn_pmu_event_release(event); | ||
1032 | } | 1124 | } |
1033 | 1125 | ||
1034 | static void arm_ccn_pmu_event_read(struct perf_event *event) | 1126 | static void arm_ccn_pmu_event_read(struct perf_event *event) |
@@ -1079,12 +1171,39 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) | |||
1079 | } | 1171 | } |
1080 | 1172 | ||
1081 | 1173 | ||
1174 | static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, | ||
1175 | unsigned long action, void *hcpu) | ||
1176 | { | ||
1177 | struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); | ||
1178 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); | ||
1179 | unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ | ||
1180 | unsigned int target; | ||
1181 | |||
1182 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1183 | case CPU_DOWN_PREPARE: | ||
1184 | if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) | ||
1185 | break; | ||
1186 | target = cpumask_any_but(cpu_online_mask, cpu); | ||
1187 | if (target < 0) | ||
1188 | break; | ||
1189 | perf_pmu_migrate_context(&dt->pmu, cpu, target); | ||
1190 | cpumask_set_cpu(target, &dt->cpu); | ||
1191 | WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); | ||
1192 | default: | ||
1193 | break; | ||
1194 | } | ||
1195 | |||
1196 | return NOTIFY_OK; | ||
1197 | } | ||
1198 | |||
1199 | |||
1082 | static DEFINE_IDA(arm_ccn_pmu_ida); | 1200 | static DEFINE_IDA(arm_ccn_pmu_ida); |
1083 | 1201 | ||
1084 | static int arm_ccn_pmu_init(struct arm_ccn *ccn) | 1202 | static int arm_ccn_pmu_init(struct arm_ccn *ccn) |
1085 | { | 1203 | { |
1086 | int i; | 1204 | int i; |
1087 | char *name; | 1205 | char *name; |
1206 | int err; | ||
1088 | 1207 | ||
1089 | /* Initialize DT subsystem */ | 1208 | /* Initialize DT subsystem */ |
1090 | ccn->dt.base = ccn->base + CCN_REGION_SIZE; | 1209 | ccn->dt.base = ccn->base + CCN_REGION_SIZE; |
@@ -1136,20 +1255,58 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1136 | }; | 1255 | }; |
1137 | 1256 | ||
1138 | /* No overflow interrupt? Have to use a timer instead. */ | 1257 | /* No overflow interrupt? Have to use a timer instead. */ |
1139 | if (!ccn->irq_used) { | 1258 | if (!ccn->irq) { |
1140 | dev_info(ccn->dev, "No access to interrupts, using timer.\n"); | 1259 | dev_info(ccn->dev, "No access to interrupts, using timer.\n"); |
1141 | hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, | 1260 | hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, |
1142 | HRTIMER_MODE_REL); | 1261 | HRTIMER_MODE_REL); |
1143 | ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; | 1262 | ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; |
1144 | } | 1263 | } |
1145 | 1264 | ||
1146 | return perf_pmu_register(&ccn->dt.pmu, name, -1); | 1265 | /* Pick one CPU which we will use to collect data from CCN... */ |
1266 | cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); | ||
1267 | |||
1268 | /* | ||
1269 | * ... and change the selection when it goes offline. Priority is | ||
1270 | * picked to have a chance to migrate events before perf is notified. | ||
1271 | */ | ||
1272 | ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; | ||
1273 | ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, | ||
1274 | err = register_cpu_notifier(&ccn->dt.cpu_nb); | ||
1275 | if (err) | ||
1276 | goto error_cpu_notifier; | ||
1277 | |||
1278 | /* Also make sure that the overflow interrupt is handled by this CPU */ | ||
1279 | if (ccn->irq) { | ||
1280 | err = irq_set_affinity(ccn->irq, &ccn->dt.cpu); | ||
1281 | if (err) { | ||
1282 | dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); | ||
1283 | goto error_set_affinity; | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | err = perf_pmu_register(&ccn->dt.pmu, name, -1); | ||
1288 | if (err) | ||
1289 | goto error_pmu_register; | ||
1290 | |||
1291 | return 0; | ||
1292 | |||
1293 | error_pmu_register: | ||
1294 | error_set_affinity: | ||
1295 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1296 | error_cpu_notifier: | ||
1297 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); | ||
1298 | for (i = 0; i < ccn->num_xps; i++) | ||
1299 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | ||
1300 | writel(0, ccn->dt.base + CCN_DT_PMCR); | ||
1301 | return err; | ||
1147 | } | 1302 | } |
1148 | 1303 | ||
1149 | static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) | 1304 | static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) |
1150 | { | 1305 | { |
1151 | int i; | 1306 | int i; |
1152 | 1307 | ||
1308 | irq_set_affinity(ccn->irq, cpu_possible_mask); | ||
1309 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1153 | for (i = 0; i < ccn->num_xps; i++) | 1310 | for (i = 0; i < ccn->num_xps; i++) |
1154 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | 1311 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); |
1155 | writel(0, ccn->dt.base + CCN_DT_PMCR); | 1312 | writel(0, ccn->dt.base + CCN_DT_PMCR); |
@@ -1285,6 +1442,7 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1285 | { | 1442 | { |
1286 | struct arm_ccn *ccn; | 1443 | struct arm_ccn *ccn; |
1287 | struct resource *res; | 1444 | struct resource *res; |
1445 | unsigned int irq; | ||
1288 | int err; | 1446 | int err; |
1289 | 1447 | ||
1290 | ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); | 1448 | ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); |
@@ -1309,6 +1467,7 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1309 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1467 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1310 | if (!res) | 1468 | if (!res) |
1311 | return -EINVAL; | 1469 | return -EINVAL; |
1470 | irq = res->start; | ||
1312 | 1471 | ||
1313 | /* Check if we can use the interrupt */ | 1472 | /* Check if we can use the interrupt */ |
1314 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, | 1473 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, |
@@ -1318,13 +1477,12 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1318 | /* Can set 'disable' bits, so can acknowledge interrupts */ | 1477 | /* Can set 'disable' bits, so can acknowledge interrupts */ |
1319 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, | 1478 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, |
1320 | ccn->base + CCN_MN_ERRINT_STATUS); | 1479 | ccn->base + CCN_MN_ERRINT_STATUS); |
1321 | err = devm_request_irq(ccn->dev, res->start, | 1480 | err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0, |
1322 | arm_ccn_irq_handler, 0, dev_name(ccn->dev), | 1481 | dev_name(ccn->dev), ccn); |
1323 | ccn); | ||
1324 | if (err) | 1482 | if (err) |
1325 | return err; | 1483 | return err; |
1326 | 1484 | ||
1327 | ccn->irq_used = 1; | 1485 | ccn->irq = irq; |
1328 | } | 1486 | } |
1329 | 1487 | ||
1330 | 1488 | ||
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 6f047dcb94c2..c43c3d2baf73 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/of_address.h> | 57 | #include <linux/of_address.h> |
58 | #include <linux/debugfs.h> | 58 | #include <linux/debugfs.h> |
59 | #include <linux/log2.h> | 59 | #include <linux/log2.h> |
60 | #include <linux/memblock.h> | ||
60 | #include <linux/syscore_ops.h> | 61 | #include <linux/syscore_ops.h> |
61 | 62 | ||
62 | /* | 63 | /* |
@@ -152,13 +153,39 @@ struct mvebu_mbus_state { | |||
152 | 153 | ||
153 | static struct mvebu_mbus_state mbus_state; | 154 | static struct mvebu_mbus_state mbus_state; |
154 | 155 | ||
156 | /* | ||
157 | * We provide two variants of the mv_mbus_dram_info() function: | ||
158 | * | ||
159 | * - The normal one, where the described DRAM ranges may overlap with | ||
160 | * the I/O windows, but for which the DRAM ranges are guaranteed to | ||
161 | * have a power of two size. Such ranges are suitable for the DMA | ||
162 | * masters that only DMA between the RAM and the device, which is | ||
163 | * actually all devices except the crypto engines. | ||
164 | * | ||
165 | * - The 'nooverlap' one, where the described DRAM ranges are | ||
166 | * guaranteed to not overlap with the I/O windows, but for which the | ||
167 | * DRAM ranges will not have power of two sizes. They will only be | ||
168 | * aligned on a 64 KB boundary, and have a size multiple of 64 | ||
169 | * KB. Such ranges are suitable for the DMA masters that DMA between | ||
170 | * the crypto SRAM (which is mapped through an I/O window) and a | ||
171 | * device. This is the case for the crypto engines. | ||
172 | */ | ||
173 | |||
155 | static struct mbus_dram_target_info mvebu_mbus_dram_info; | 174 | static struct mbus_dram_target_info mvebu_mbus_dram_info; |
175 | static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap; | ||
176 | |||
156 | const struct mbus_dram_target_info *mv_mbus_dram_info(void) | 177 | const struct mbus_dram_target_info *mv_mbus_dram_info(void) |
157 | { | 178 | { |
158 | return &mvebu_mbus_dram_info; | 179 | return &mvebu_mbus_dram_info; |
159 | } | 180 | } |
160 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info); | 181 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info); |
161 | 182 | ||
183 | const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) | ||
184 | { | ||
185 | return &mvebu_mbus_dram_info_nooverlap; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap); | ||
188 | |||
162 | /* Checks whether the given window has remap capability */ | 189 | /* Checks whether the given window has remap capability */ |
163 | static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, | 190 | static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, |
164 | const int win) | 191 | const int win) |
@@ -576,6 +603,95 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) | |||
576 | return MVEBU_MBUS_NO_REMAP; | 603 | return MVEBU_MBUS_NO_REMAP; |
577 | } | 604 | } |
578 | 605 | ||
606 | /* | ||
607 | * Use the memblock information to find the MBus bridge hole in the | ||
608 | * physical address space. | ||
609 | */ | ||
610 | static void __init | ||
611 | mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) | ||
612 | { | ||
613 | struct memblock_region *r; | ||
614 | uint64_t s = 0; | ||
615 | |||
616 | for_each_memblock(memory, r) { | ||
617 | /* | ||
618 | * This part of the memory is above 4 GB, so we don't | ||
619 | * care for the MBus bridge hole. | ||
620 | */ | ||
621 | if (r->base >= 0x100000000ULL) | ||
622 | continue; | ||
623 | |||
624 | /* | ||
625 | * The MBus bridge hole is at the end of the RAM under | ||
626 | * the 4 GB limit. | ||
627 | */ | ||
628 | if (r->base + r->size > s) | ||
629 | s = r->base + r->size; | ||
630 | } | ||
631 | |||
632 | *start = s; | ||
633 | *end = 0x100000000ULL; | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * This function fills in the mvebu_mbus_dram_info_nooverlap data | ||
638 | * structure, by looking at the mvebu_mbus_dram_info data, and | ||
639 | * removing the parts of it that overlap with I/O windows. | ||
640 | */ | ||
641 | static void __init | ||
642 | mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus) | ||
643 | { | ||
644 | uint64_t mbus_bridge_base, mbus_bridge_end; | ||
645 | int cs_nooverlap = 0; | ||
646 | int i; | ||
647 | |||
648 | mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); | ||
649 | |||
650 | for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) { | ||
651 | struct mbus_dram_window *w; | ||
652 | u64 base, size, end; | ||
653 | |||
654 | w = &mvebu_mbus_dram_info.cs[i]; | ||
655 | base = w->base; | ||
656 | size = w->size; | ||
657 | end = base + size; | ||
658 | |||
659 | /* | ||
660 | * The CS is fully enclosed inside the MBus bridge | ||
661 | * area, so ignore it. | ||
662 | */ | ||
663 | if (base >= mbus_bridge_base && end <= mbus_bridge_end) | ||
664 | continue; | ||
665 | |||
666 | /* | ||
667 | * Beginning of CS overlaps with end of MBus, raise CS | ||
668 | * base address, and shrink its size. | ||
669 | */ | ||
670 | if (base >= mbus_bridge_base && end > mbus_bridge_end) { | ||
671 | size -= mbus_bridge_end - base; | ||
672 | base = mbus_bridge_end; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * End of CS overlaps with beginning of MBus, shrink | ||
677 | * CS size. | ||
678 | */ | ||
679 | if (base < mbus_bridge_base && end > mbus_bridge_base) | ||
680 | size -= end - mbus_bridge_base; | ||
681 | |||
682 | w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; | ||
683 | w->cs_index = i; | ||
684 | w->mbus_attr = 0xf & ~(1 << i); | ||
685 | if (mbus->hw_io_coherency) | ||
686 | w->mbus_attr |= ATTR_HW_COHERENCY; | ||
687 | w->base = base; | ||
688 | w->size = size; | ||
689 | } | ||
690 | |||
691 | mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR; | ||
692 | mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap; | ||
693 | } | ||
694 | |||
579 | static void __init | 695 | static void __init |
580 | mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) | 696 | mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) |
581 | { | 697 | { |
@@ -964,6 +1080,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | |||
964 | mvebu_mbus_disable_window(mbus, win); | 1080 | mvebu_mbus_disable_window(mbus, win); |
965 | 1081 | ||
966 | mbus->soc->setup_cpu_target(mbus); | 1082 | mbus->soc->setup_cpu_target(mbus); |
1083 | mvebu_mbus_setup_cpu_target_nooverlap(mbus); | ||
967 | 1084 | ||
968 | if (is_coherent) | 1085 | if (is_coherent) |
969 | writel(UNIT_SYNC_BARRIER_ALL, | 1086 | writel(UNIT_SYNC_BARRIER_ALL, |
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index 515fb133495c..73153fc45ee9 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c | |||
@@ -502,12 +502,13 @@ static const struct berlin2_gate_data bg2_gates[] __initconst = { | |||
502 | 502 | ||
503 | static void __init berlin2_clock_setup(struct device_node *np) | 503 | static void __init berlin2_clock_setup(struct device_node *np) |
504 | { | 504 | { |
505 | struct device_node *parent_np = of_get_parent(np); | ||
505 | const char *parent_names[9]; | 506 | const char *parent_names[9]; |
506 | struct clk *clk; | 507 | struct clk *clk; |
507 | u8 avpll_flags = 0; | 508 | u8 avpll_flags = 0; |
508 | int n; | 509 | int n; |
509 | 510 | ||
510 | gbase = of_iomap(np, 0); | 511 | gbase = of_iomap(parent_np, 0); |
511 | if (!gbase) | 512 | if (!gbase) |
512 | return; | 513 | return; |
513 | 514 | ||
@@ -685,7 +686,5 @@ static void __init berlin2_clock_setup(struct device_node *np) | |||
685 | bg2_fail: | 686 | bg2_fail: |
686 | iounmap(gbase); | 687 | iounmap(gbase); |
687 | } | 688 | } |
688 | CLK_OF_DECLARE(berlin2_clock, "marvell,berlin2-chip-ctrl", | 689 | CLK_OF_DECLARE(berlin2_clk, "marvell,berlin2-clk", |
689 | berlin2_clock_setup); | ||
690 | CLK_OF_DECLARE(berlin2cd_clock, "marvell,berlin2cd-chip-ctrl", | ||
691 | berlin2_clock_setup); | 690 | berlin2_clock_setup); |
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 440ef81ab15c..221f40c2b850 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c | |||
@@ -290,18 +290,19 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = { | |||
290 | 290 | ||
291 | static void __init berlin2q_clock_setup(struct device_node *np) | 291 | static void __init berlin2q_clock_setup(struct device_node *np) |
292 | { | 292 | { |
293 | struct device_node *parent_np = of_get_parent(np); | ||
293 | const char *parent_names[9]; | 294 | const char *parent_names[9]; |
294 | struct clk *clk; | 295 | struct clk *clk; |
295 | int n; | 296 | int n; |
296 | 297 | ||
297 | gbase = of_iomap(np, 0); | 298 | gbase = of_iomap(parent_np, 0); |
298 | if (!gbase) { | 299 | if (!gbase) { |
299 | pr_err("%s: Unable to map global base\n", np->full_name); | 300 | pr_err("%s: Unable to map global base\n", np->full_name); |
300 | return; | 301 | return; |
301 | } | 302 | } |
302 | 303 | ||
303 | /* BG2Q CPU PLL is not part of global registers */ | 304 | /* BG2Q CPU PLL is not part of global registers */ |
304 | cpupll_base = of_iomap(np, 1); | 305 | cpupll_base = of_iomap(parent_np, 1); |
305 | if (!cpupll_base) { | 306 | if (!cpupll_base) { |
306 | pr_err("%s: Unable to map cpupll base\n", np->full_name); | 307 | pr_err("%s: Unable to map cpupll base\n", np->full_name); |
307 | iounmap(gbase); | 308 | iounmap(gbase); |
@@ -384,5 +385,5 @@ bg2q_fail: | |||
384 | iounmap(cpupll_base); | 385 | iounmap(cpupll_base); |
385 | iounmap(gbase); | 386 | iounmap(gbase); |
386 | } | 387 | } |
387 | CLK_OF_DECLARE(berlin2q_clock, "marvell,berlin2q-chip-ctrl", | 388 | CLK_OF_DECLARE(berlin2q_clk, "marvell,berlin2q-clk", |
388 | berlin2q_clock_setup); | 389 | berlin2q_clock_setup); |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3fdd3912709a..3001f1ae1062 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -12,7 +12,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o | |||
12 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o | 12 | obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o |
13 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | 13 | obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o |
14 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o | 14 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o |
15 | CFLAGS_qcom_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) | 15 | obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o |
16 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) | ||
16 | 17 | ||
17 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | 18 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ |
18 | obj-$(CONFIG_EFI) += efi/ | 19 | obj-$(CONFIG_EFI) += efi/ |
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c new file mode 100644 index 000000000000..1bd6f9c34331 --- /dev/null +++ b/drivers/firmware/qcom_scm-32.c | |||
@@ -0,0 +1,503 @@ | |||
1 | /* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. | ||
2 | * Copyright (C) 2015 Linaro Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/slab.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/qcom_scm.h> | ||
26 | |||
27 | #include <asm/outercache.h> | ||
28 | #include <asm/cacheflush.h> | ||
29 | |||
30 | #include "qcom_scm.h" | ||
31 | |||
32 | #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 | ||
33 | #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 | ||
34 | #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 | ||
35 | #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 | ||
36 | |||
37 | #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 | ||
38 | #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 | ||
39 | #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 | ||
40 | #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 | ||
41 | |||
42 | struct qcom_scm_entry { | ||
43 | int flag; | ||
44 | void *entry; | ||
45 | }; | ||
46 | |||
47 | static struct qcom_scm_entry qcom_scm_wb[] = { | ||
48 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, | ||
49 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, | ||
50 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, | ||
51 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, | ||
52 | }; | ||
53 | |||
54 | static DEFINE_MUTEX(qcom_scm_lock); | ||
55 | |||
56 | /** | ||
57 | * struct qcom_scm_command - one SCM command buffer | ||
58 | * @len: total available memory for command and response | ||
59 | * @buf_offset: start of command buffer | ||
60 | * @resp_hdr_offset: start of response buffer | ||
61 | * @id: command to be executed | ||
62 | * @buf: buffer returned from qcom_scm_get_command_buffer() | ||
63 | * | ||
64 | * An SCM command is laid out in memory as follows: | ||
65 | * | ||
66 | * ------------------- <--- struct qcom_scm_command | ||
67 | * | command header | | ||
68 | * ------------------- <--- qcom_scm_get_command_buffer() | ||
69 | * | command buffer | | ||
70 | * ------------------- <--- struct qcom_scm_response and | ||
71 | * | response header | qcom_scm_command_to_response() | ||
72 | * ------------------- <--- qcom_scm_get_response_buffer() | ||
73 | * | response buffer | | ||
74 | * ------------------- | ||
75 | * | ||
76 | * There can be arbitrary padding between the headers and buffers so | ||
77 | * you should always use the appropriate qcom_scm_get_*_buffer() routines | ||
78 | * to access the buffers in a safe manner. | ||
79 | */ | ||
80 | struct qcom_scm_command { | ||
81 | __le32 len; | ||
82 | __le32 buf_offset; | ||
83 | __le32 resp_hdr_offset; | ||
84 | __le32 id; | ||
85 | __le32 buf[0]; | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * struct qcom_scm_response - one SCM response buffer | ||
90 | * @len: total available memory for response | ||
91 | * @buf_offset: start of response data relative to start of qcom_scm_response | ||
92 | * @is_complete: indicates if the command has finished processing | ||
93 | */ | ||
94 | struct qcom_scm_response { | ||
95 | __le32 len; | ||
96 | __le32 buf_offset; | ||
97 | __le32 is_complete; | ||
98 | }; | ||
99 | |||
100 | /** | ||
101 | * alloc_qcom_scm_command() - Allocate an SCM command | ||
102 | * @cmd_size: size of the command buffer | ||
103 | * @resp_size: size of the response buffer | ||
104 | * | ||
105 | * Allocate an SCM command, including enough room for the command | ||
106 | * and response headers as well as the command and response buffers. | ||
107 | * | ||
108 | * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails. | ||
109 | */ | ||
110 | static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size) | ||
111 | { | ||
112 | struct qcom_scm_command *cmd; | ||
113 | size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size + | ||
114 | resp_size; | ||
115 | u32 offset; | ||
116 | |||
117 | cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); | ||
118 | if (cmd) { | ||
119 | cmd->len = cpu_to_le32(len); | ||
120 | offset = offsetof(struct qcom_scm_command, buf); | ||
121 | cmd->buf_offset = cpu_to_le32(offset); | ||
122 | cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size); | ||
123 | } | ||
124 | return cmd; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * free_qcom_scm_command() - Free an SCM command | ||
129 | * @cmd: command to free | ||
130 | * | ||
131 | * Free an SCM command. | ||
132 | */ | ||
133 | static inline void free_qcom_scm_command(struct qcom_scm_command *cmd) | ||
134 | { | ||
135 | kfree(cmd); | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response | ||
140 | * @cmd: command | ||
141 | * | ||
142 | * Returns a pointer to a response for a command. | ||
143 | */ | ||
144 | static inline struct qcom_scm_response *qcom_scm_command_to_response( | ||
145 | const struct qcom_scm_command *cmd) | ||
146 | { | ||
147 | return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * qcom_scm_get_command_buffer() - Get a pointer to a command buffer | ||
152 | * @cmd: command | ||
153 | * | ||
154 | * Returns a pointer to the command buffer of a command. | ||
155 | */ | ||
156 | static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) | ||
157 | { | ||
158 | return (void *)cmd->buf; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * qcom_scm_get_response_buffer() - Get a pointer to a response buffer | ||
163 | * @rsp: response | ||
164 | * | ||
165 | * Returns a pointer to a response buffer of a response. | ||
166 | */ | ||
167 | static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) | ||
168 | { | ||
169 | return (void *)rsp + le32_to_cpu(rsp->buf_offset); | ||
170 | } | ||
171 | |||
172 | static int qcom_scm_remap_error(int err) | ||
173 | { | ||
174 | pr_err("qcom_scm_call failed with error code %d\n", err); | ||
175 | switch (err) { | ||
176 | case QCOM_SCM_ERROR: | ||
177 | return -EIO; | ||
178 | case QCOM_SCM_EINVAL_ADDR: | ||
179 | case QCOM_SCM_EINVAL_ARG: | ||
180 | return -EINVAL; | ||
181 | case QCOM_SCM_EOPNOTSUPP: | ||
182 | return -EOPNOTSUPP; | ||
183 | case QCOM_SCM_ENOMEM: | ||
184 | return -ENOMEM; | ||
185 | } | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | static u32 smc(u32 cmd_addr) | ||
190 | { | ||
191 | int context_id; | ||
192 | register u32 r0 asm("r0") = 1; | ||
193 | register u32 r1 asm("r1") = (u32)&context_id; | ||
194 | register u32 r2 asm("r2") = cmd_addr; | ||
195 | do { | ||
196 | asm volatile( | ||
197 | __asmeq("%0", "r0") | ||
198 | __asmeq("%1", "r0") | ||
199 | __asmeq("%2", "r1") | ||
200 | __asmeq("%3", "r2") | ||
201 | #ifdef REQUIRES_SEC | ||
202 | ".arch_extension sec\n" | ||
203 | #endif | ||
204 | "smc #0 @ switch to secure world\n" | ||
205 | : "=r" (r0) | ||
206 | : "r" (r0), "r" (r1), "r" (r2) | ||
207 | : "r3"); | ||
208 | } while (r0 == QCOM_SCM_INTERRUPTED); | ||
209 | |||
210 | return r0; | ||
211 | } | ||
212 | |||
213 | static int __qcom_scm_call(const struct qcom_scm_command *cmd) | ||
214 | { | ||
215 | int ret; | ||
216 | u32 cmd_addr = virt_to_phys(cmd); | ||
217 | |||
218 | /* | ||
219 | * Flush the command buffer so that the secure world sees | ||
220 | * the correct data. | ||
221 | */ | ||
222 | __cpuc_flush_dcache_area((void *)cmd, cmd->len); | ||
223 | outer_flush_range(cmd_addr, cmd_addr + cmd->len); | ||
224 | |||
225 | ret = smc(cmd_addr); | ||
226 | if (ret < 0) | ||
227 | ret = qcom_scm_remap_error(ret); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static void qcom_scm_inv_range(unsigned long start, unsigned long end) | ||
233 | { | ||
234 | u32 cacheline_size, ctr; | ||
235 | |||
236 | asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); | ||
237 | cacheline_size = 4 << ((ctr >> 16) & 0xf); | ||
238 | |||
239 | start = round_down(start, cacheline_size); | ||
240 | end = round_up(end, cacheline_size); | ||
241 | outer_inv_range(start, end); | ||
242 | while (start < end) { | ||
243 | asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) | ||
244 | : "memory"); | ||
245 | start += cacheline_size; | ||
246 | } | ||
247 | dsb(); | ||
248 | isb(); | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * qcom_scm_call() - Send an SCM command | ||
253 | * @svc_id: service identifier | ||
254 | * @cmd_id: command identifier | ||
255 | * @cmd_buf: command buffer | ||
256 | * @cmd_len: length of the command buffer | ||
257 | * @resp_buf: response buffer | ||
258 | * @resp_len: length of the response buffer | ||
259 | * | ||
260 | * Sends a command to the SCM and waits for the command to finish processing. | ||
261 | * | ||
262 | * A note on cache maintenance: | ||
263 | * Note that any buffers that are expected to be accessed by the secure world | ||
264 | * must be flushed before invoking qcom_scm_call and invalidated in the cache | ||
265 | * immediately after qcom_scm_call returns. Cache maintenance on the command | ||
266 | * and response buffers is taken care of by qcom_scm_call; however, callers are | ||
267 | * responsible for any other cached buffers passed over to the secure world. | ||
268 | */ | ||
269 | static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, | ||
270 | size_t cmd_len, void *resp_buf, size_t resp_len) | ||
271 | { | ||
272 | int ret; | ||
273 | struct qcom_scm_command *cmd; | ||
274 | struct qcom_scm_response *rsp; | ||
275 | unsigned long start, end; | ||
276 | |||
277 | cmd = alloc_qcom_scm_command(cmd_len, resp_len); | ||
278 | if (!cmd) | ||
279 | return -ENOMEM; | ||
280 | |||
281 | cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); | ||
282 | if (cmd_buf) | ||
283 | memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); | ||
284 | |||
285 | mutex_lock(&qcom_scm_lock); | ||
286 | ret = __qcom_scm_call(cmd); | ||
287 | mutex_unlock(&qcom_scm_lock); | ||
288 | if (ret) | ||
289 | goto out; | ||
290 | |||
291 | rsp = qcom_scm_command_to_response(cmd); | ||
292 | start = (unsigned long)rsp; | ||
293 | |||
294 | do { | ||
295 | qcom_scm_inv_range(start, start + sizeof(*rsp)); | ||
296 | } while (!rsp->is_complete); | ||
297 | |||
298 | end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len; | ||
299 | qcom_scm_inv_range(start, end); | ||
300 | |||
301 | if (resp_buf) | ||
302 | memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len); | ||
303 | out: | ||
304 | free_qcom_scm_command(cmd); | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | #define SCM_CLASS_REGISTER (0x2 << 8) | ||
309 | #define SCM_MASK_IRQS BIT(5) | ||
310 | #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ | ||
311 | SCM_CLASS_REGISTER | \ | ||
312 | SCM_MASK_IRQS | \ | ||
313 | (n & 0xf)) | ||
314 | |||
315 | /** | ||
316 | * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument | ||
317 | * @svc_id: service identifier | ||
318 | * @cmd_id: command identifier | ||
319 | * @arg1: first argument | ||
320 | * | ||
321 | * This shall only be used with commands that are guaranteed to be | ||
322 | * uninterruptable, atomic and SMP safe. | ||
323 | */ | ||
324 | static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) | ||
325 | { | ||
326 | int context_id; | ||
327 | |||
328 | register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); | ||
329 | register u32 r1 asm("r1") = (u32)&context_id; | ||
330 | register u32 r2 asm("r2") = arg1; | ||
331 | |||
332 | asm volatile( | ||
333 | __asmeq("%0", "r0") | ||
334 | __asmeq("%1", "r0") | ||
335 | __asmeq("%2", "r1") | ||
336 | __asmeq("%3", "r2") | ||
337 | #ifdef REQUIRES_SEC | ||
338 | ".arch_extension sec\n" | ||
339 | #endif | ||
340 | "smc #0 @ switch to secure world\n" | ||
341 | : "=r" (r0) | ||
342 | : "r" (r0), "r" (r1), "r" (r2) | ||
343 | : "r3"); | ||
344 | return r0; | ||
345 | } | ||
346 | |||
347 | u32 qcom_scm_get_version(void) | ||
348 | { | ||
349 | int context_id; | ||
350 | static u32 version = -1; | ||
351 | register u32 r0 asm("r0"); | ||
352 | register u32 r1 asm("r1"); | ||
353 | |||
354 | if (version != -1) | ||
355 | return version; | ||
356 | |||
357 | mutex_lock(&qcom_scm_lock); | ||
358 | |||
359 | r0 = 0x1 << 8; | ||
360 | r1 = (u32)&context_id; | ||
361 | do { | ||
362 | asm volatile( | ||
363 | __asmeq("%0", "r0") | ||
364 | __asmeq("%1", "r1") | ||
365 | __asmeq("%2", "r0") | ||
366 | __asmeq("%3", "r1") | ||
367 | #ifdef REQUIRES_SEC | ||
368 | ".arch_extension sec\n" | ||
369 | #endif | ||
370 | "smc #0 @ switch to secure world\n" | ||
371 | : "=r" (r0), "=r" (r1) | ||
372 | : "r" (r0), "r" (r1) | ||
373 | : "r2", "r3"); | ||
374 | } while (r0 == QCOM_SCM_INTERRUPTED); | ||
375 | |||
376 | version = r1; | ||
377 | mutex_unlock(&qcom_scm_lock); | ||
378 | |||
379 | return version; | ||
380 | } | ||
381 | EXPORT_SYMBOL(qcom_scm_get_version); | ||
382 | |||
383 | /* | ||
384 | * Set the cold/warm boot address for one of the CPU cores. | ||
385 | */ | ||
386 | static int qcom_scm_set_boot_addr(u32 addr, int flags) | ||
387 | { | ||
388 | struct { | ||
389 | __le32 flags; | ||
390 | __le32 addr; | ||
391 | } cmd; | ||
392 | |||
393 | cmd.addr = cpu_to_le32(addr); | ||
394 | cmd.flags = cpu_to_le32(flags); | ||
395 | return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, | ||
396 | &cmd, sizeof(cmd), NULL, 0); | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus | ||
401 | * @entry: Entry point function for the cpus | ||
402 | * @cpus: The cpumask of cpus that will use the entry point | ||
403 | * | ||
404 | * Set the cold boot address of the cpus. Any cpu outside the supported | ||
405 | * range would be removed from the cpu present mask. | ||
406 | */ | ||
407 | int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) | ||
408 | { | ||
409 | int flags = 0; | ||
410 | int cpu; | ||
411 | int scm_cb_flags[] = { | ||
412 | QCOM_SCM_FLAG_COLDBOOT_CPU0, | ||
413 | QCOM_SCM_FLAG_COLDBOOT_CPU1, | ||
414 | QCOM_SCM_FLAG_COLDBOOT_CPU2, | ||
415 | QCOM_SCM_FLAG_COLDBOOT_CPU3, | ||
416 | }; | ||
417 | |||
418 | if (!cpus || (cpus && cpumask_empty(cpus))) | ||
419 | return -EINVAL; | ||
420 | |||
421 | for_each_cpu(cpu, cpus) { | ||
422 | if (cpu < ARRAY_SIZE(scm_cb_flags)) | ||
423 | flags |= scm_cb_flags[cpu]; | ||
424 | else | ||
425 | set_cpu_present(cpu, false); | ||
426 | } | ||
427 | |||
428 | return qcom_scm_set_boot_addr(virt_to_phys(entry), flags); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus | ||
433 | * @entry: Entry point function for the cpus | ||
434 | * @cpus: The cpumask of cpus that will use the entry point | ||
435 | * | ||
436 | * Set the Linux entry point for the SCM to transfer control to when coming | ||
437 | * out of a power down. CPU power down may be executed on cpuidle or hotplug. | ||
438 | */ | ||
439 | int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) | ||
440 | { | ||
441 | int ret; | ||
442 | int flags = 0; | ||
443 | int cpu; | ||
444 | |||
445 | /* | ||
446 | * Reassign only if we are switching from hotplug entry point | ||
447 | * to cpuidle entry point or vice versa. | ||
448 | */ | ||
449 | for_each_cpu(cpu, cpus) { | ||
450 | if (entry == qcom_scm_wb[cpu].entry) | ||
451 | continue; | ||
452 | flags |= qcom_scm_wb[cpu].flag; | ||
453 | } | ||
454 | |||
455 | /* No change in entry function */ | ||
456 | if (!flags) | ||
457 | return 0; | ||
458 | |||
459 | ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags); | ||
460 | if (!ret) { | ||
461 | for_each_cpu(cpu, cpus) | ||
462 | qcom_scm_wb[cpu].entry = entry; | ||
463 | } | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * qcom_scm_cpu_power_down() - Power down the cpu | ||
470 | * @flags - Flags to flush cache | ||
471 | * | ||
472 | * This is an end point to power down cpu. If there was a pending interrupt, | ||
473 | * the control would return from this function, otherwise, the cpu jumps to the | ||
474 | * warm boot entry point set for this cpu upon reset. | ||
475 | */ | ||
476 | void __qcom_scm_cpu_power_down(u32 flags) | ||
477 | { | ||
478 | qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, | ||
479 | flags & QCOM_SCM_FLUSH_FLAG_MASK); | ||
480 | } | ||
481 | |||
482 | int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) | ||
483 | { | ||
484 | int ret; | ||
485 | u32 svc_cmd = (svc_id << 10) | cmd_id; | ||
486 | u32 ret_val = 0; | ||
487 | |||
488 | ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, | ||
489 | sizeof(svc_cmd), &ret_val, sizeof(ret_val)); | ||
490 | if (ret) | ||
491 | return ret; | ||
492 | |||
493 | return ret_val; | ||
494 | } | ||
495 | |||
496 | int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) | ||
497 | { | ||
498 | if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) | ||
499 | return -ERANGE; | ||
500 | |||
501 | return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, | ||
502 | req, req_cnt * sizeof(*req), resp, sizeof(*resp)); | ||
503 | } | ||
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 994b50fd997c..45c008d68891 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. | 1 | /* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. |
2 | * Copyright (C) 2015 Linaro Ltd. | 2 | * Copyright (C) 2015 Linaro Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -16,393 +16,12 @@ | |||
16 | * 02110-1301, USA. | 16 | * 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/slab.h> | 19 | #include <linux/cpumask.h> |
20 | #include <linux/io.h> | 20 | #include <linux/export.h> |
21 | #include <linux/module.h> | 21 | #include <linux/types.h> |
22 | #include <linux/mutex.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/qcom_scm.h> | 22 | #include <linux/qcom_scm.h> |
26 | 23 | ||
27 | #include <asm/outercache.h> | 24 | #include "qcom_scm.h" |
28 | #include <asm/cacheflush.h> | ||
29 | |||
30 | |||
31 | #define QCOM_SCM_ENOMEM -5 | ||
32 | #define QCOM_SCM_EOPNOTSUPP -4 | ||
33 | #define QCOM_SCM_EINVAL_ADDR -3 | ||
34 | #define QCOM_SCM_EINVAL_ARG -2 | ||
35 | #define QCOM_SCM_ERROR -1 | ||
36 | #define QCOM_SCM_INTERRUPTED 1 | ||
37 | |||
38 | #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 | ||
39 | #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 | ||
40 | #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 | ||
41 | #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 | ||
42 | |||
43 | #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 | ||
44 | #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 | ||
45 | #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 | ||
46 | #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 | ||
47 | |||
48 | struct qcom_scm_entry { | ||
49 | int flag; | ||
50 | void *entry; | ||
51 | }; | ||
52 | |||
53 | static struct qcom_scm_entry qcom_scm_wb[] = { | ||
54 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, | ||
55 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, | ||
56 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, | ||
57 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, | ||
58 | }; | ||
59 | |||
60 | static DEFINE_MUTEX(qcom_scm_lock); | ||
61 | |||
62 | /** | ||
63 | * struct qcom_scm_command - one SCM command buffer | ||
64 | * @len: total available memory for command and response | ||
65 | * @buf_offset: start of command buffer | ||
66 | * @resp_hdr_offset: start of response buffer | ||
67 | * @id: command to be executed | ||
68 | * @buf: buffer returned from qcom_scm_get_command_buffer() | ||
69 | * | ||
70 | * An SCM command is laid out in memory as follows: | ||
71 | * | ||
72 | * ------------------- <--- struct qcom_scm_command | ||
73 | * | command header | | ||
74 | * ------------------- <--- qcom_scm_get_command_buffer() | ||
75 | * | command buffer | | ||
76 | * ------------------- <--- struct qcom_scm_response and | ||
77 | * | response header | qcom_scm_command_to_response() | ||
78 | * ------------------- <--- qcom_scm_get_response_buffer() | ||
79 | * | response buffer | | ||
80 | * ------------------- | ||
81 | * | ||
82 | * There can be arbitrary padding between the headers and buffers so | ||
83 | * you should always use the appropriate qcom_scm_get_*_buffer() routines | ||
84 | * to access the buffers in a safe manner. | ||
85 | */ | ||
86 | struct qcom_scm_command { | ||
87 | __le32 len; | ||
88 | __le32 buf_offset; | ||
89 | __le32 resp_hdr_offset; | ||
90 | __le32 id; | ||
91 | __le32 buf[0]; | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * struct qcom_scm_response - one SCM response buffer | ||
96 | * @len: total available memory for response | ||
97 | * @buf_offset: start of response data relative to start of qcom_scm_response | ||
98 | * @is_complete: indicates if the command has finished processing | ||
99 | */ | ||
100 | struct qcom_scm_response { | ||
101 | __le32 len; | ||
102 | __le32 buf_offset; | ||
103 | __le32 is_complete; | ||
104 | }; | ||
105 | |||
106 | /** | ||
107 | * alloc_qcom_scm_command() - Allocate an SCM command | ||
108 | * @cmd_size: size of the command buffer | ||
109 | * @resp_size: size of the response buffer | ||
110 | * | ||
111 | * Allocate an SCM command, including enough room for the command | ||
112 | * and response headers as well as the command and response buffers. | ||
113 | * | ||
114 | * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails. | ||
115 | */ | ||
116 | static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size) | ||
117 | { | ||
118 | struct qcom_scm_command *cmd; | ||
119 | size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size + | ||
120 | resp_size; | ||
121 | u32 offset; | ||
122 | |||
123 | cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); | ||
124 | if (cmd) { | ||
125 | cmd->len = cpu_to_le32(len); | ||
126 | offset = offsetof(struct qcom_scm_command, buf); | ||
127 | cmd->buf_offset = cpu_to_le32(offset); | ||
128 | cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size); | ||
129 | } | ||
130 | return cmd; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * free_qcom_scm_command() - Free an SCM command | ||
135 | * @cmd: command to free | ||
136 | * | ||
137 | * Free an SCM command. | ||
138 | */ | ||
139 | static inline void free_qcom_scm_command(struct qcom_scm_command *cmd) | ||
140 | { | ||
141 | kfree(cmd); | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response | ||
146 | * @cmd: command | ||
147 | * | ||
148 | * Returns a pointer to a response for a command. | ||
149 | */ | ||
150 | static inline struct qcom_scm_response *qcom_scm_command_to_response( | ||
151 | const struct qcom_scm_command *cmd) | ||
152 | { | ||
153 | return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * qcom_scm_get_command_buffer() - Get a pointer to a command buffer | ||
158 | * @cmd: command | ||
159 | * | ||
160 | * Returns a pointer to the command buffer of a command. | ||
161 | */ | ||
162 | static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) | ||
163 | { | ||
164 | return (void *)cmd->buf; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * qcom_scm_get_response_buffer() - Get a pointer to a response buffer | ||
169 | * @rsp: response | ||
170 | * | ||
171 | * Returns a pointer to a response buffer of a response. | ||
172 | */ | ||
173 | static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) | ||
174 | { | ||
175 | return (void *)rsp + le32_to_cpu(rsp->buf_offset); | ||
176 | } | ||
177 | |||
178 | static int qcom_scm_remap_error(int err) | ||
179 | { | ||
180 | pr_err("qcom_scm_call failed with error code %d\n", err); | ||
181 | switch (err) { | ||
182 | case QCOM_SCM_ERROR: | ||
183 | return -EIO; | ||
184 | case QCOM_SCM_EINVAL_ADDR: | ||
185 | case QCOM_SCM_EINVAL_ARG: | ||
186 | return -EINVAL; | ||
187 | case QCOM_SCM_EOPNOTSUPP: | ||
188 | return -EOPNOTSUPP; | ||
189 | case QCOM_SCM_ENOMEM: | ||
190 | return -ENOMEM; | ||
191 | } | ||
192 | return -EINVAL; | ||
193 | } | ||
194 | |||
195 | static u32 smc(u32 cmd_addr) | ||
196 | { | ||
197 | int context_id; | ||
198 | register u32 r0 asm("r0") = 1; | ||
199 | register u32 r1 asm("r1") = (u32)&context_id; | ||
200 | register u32 r2 asm("r2") = cmd_addr; | ||
201 | do { | ||
202 | asm volatile( | ||
203 | __asmeq("%0", "r0") | ||
204 | __asmeq("%1", "r0") | ||
205 | __asmeq("%2", "r1") | ||
206 | __asmeq("%3", "r2") | ||
207 | #ifdef REQUIRES_SEC | ||
208 | ".arch_extension sec\n" | ||
209 | #endif | ||
210 | "smc #0 @ switch to secure world\n" | ||
211 | : "=r" (r0) | ||
212 | : "r" (r0), "r" (r1), "r" (r2) | ||
213 | : "r3"); | ||
214 | } while (r0 == QCOM_SCM_INTERRUPTED); | ||
215 | |||
216 | return r0; | ||
217 | } | ||
218 | |||
219 | static int __qcom_scm_call(const struct qcom_scm_command *cmd) | ||
220 | { | ||
221 | int ret; | ||
222 | u32 cmd_addr = virt_to_phys(cmd); | ||
223 | |||
224 | /* | ||
225 | * Flush the command buffer so that the secure world sees | ||
226 | * the correct data. | ||
227 | */ | ||
228 | __cpuc_flush_dcache_area((void *)cmd, cmd->len); | ||
229 | outer_flush_range(cmd_addr, cmd_addr + cmd->len); | ||
230 | |||
231 | ret = smc(cmd_addr); | ||
232 | if (ret < 0) | ||
233 | ret = qcom_scm_remap_error(ret); | ||
234 | |||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | static void qcom_scm_inv_range(unsigned long start, unsigned long end) | ||
239 | { | ||
240 | u32 cacheline_size, ctr; | ||
241 | |||
242 | asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); | ||
243 | cacheline_size = 4 << ((ctr >> 16) & 0xf); | ||
244 | |||
245 | start = round_down(start, cacheline_size); | ||
246 | end = round_up(end, cacheline_size); | ||
247 | outer_inv_range(start, end); | ||
248 | while (start < end) { | ||
249 | asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) | ||
250 | : "memory"); | ||
251 | start += cacheline_size; | ||
252 | } | ||
253 | dsb(); | ||
254 | isb(); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * qcom_scm_call() - Send an SCM command | ||
259 | * @svc_id: service identifier | ||
260 | * @cmd_id: command identifier | ||
261 | * @cmd_buf: command buffer | ||
262 | * @cmd_len: length of the command buffer | ||
263 | * @resp_buf: response buffer | ||
264 | * @resp_len: length of the response buffer | ||
265 | * | ||
266 | * Sends a command to the SCM and waits for the command to finish processing. | ||
267 | * | ||
268 | * A note on cache maintenance: | ||
269 | * Note that any buffers that are expected to be accessed by the secure world | ||
270 | * must be flushed before invoking qcom_scm_call and invalidated in the cache | ||
271 | * immediately after qcom_scm_call returns. Cache maintenance on the command | ||
272 | * and response buffers is taken care of by qcom_scm_call; however, callers are | ||
273 | * responsible for any other cached buffers passed over to the secure world. | ||
274 | */ | ||
275 | static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, | ||
276 | size_t cmd_len, void *resp_buf, size_t resp_len) | ||
277 | { | ||
278 | int ret; | ||
279 | struct qcom_scm_command *cmd; | ||
280 | struct qcom_scm_response *rsp; | ||
281 | unsigned long start, end; | ||
282 | |||
283 | cmd = alloc_qcom_scm_command(cmd_len, resp_len); | ||
284 | if (!cmd) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); | ||
288 | if (cmd_buf) | ||
289 | memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); | ||
290 | |||
291 | mutex_lock(&qcom_scm_lock); | ||
292 | ret = __qcom_scm_call(cmd); | ||
293 | mutex_unlock(&qcom_scm_lock); | ||
294 | if (ret) | ||
295 | goto out; | ||
296 | |||
297 | rsp = qcom_scm_command_to_response(cmd); | ||
298 | start = (unsigned long)rsp; | ||
299 | |||
300 | do { | ||
301 | qcom_scm_inv_range(start, start + sizeof(*rsp)); | ||
302 | } while (!rsp->is_complete); | ||
303 | |||
304 | end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len; | ||
305 | qcom_scm_inv_range(start, end); | ||
306 | |||
307 | if (resp_buf) | ||
308 | memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len); | ||
309 | out: | ||
310 | free_qcom_scm_command(cmd); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | #define SCM_CLASS_REGISTER (0x2 << 8) | ||
315 | #define SCM_MASK_IRQS BIT(5) | ||
316 | #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ | ||
317 | SCM_CLASS_REGISTER | \ | ||
318 | SCM_MASK_IRQS | \ | ||
319 | (n & 0xf)) | ||
320 | |||
321 | /** | ||
322 | * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument | ||
323 | * @svc_id: service identifier | ||
324 | * @cmd_id: command identifier | ||
325 | * @arg1: first argument | ||
326 | * | ||
327 | * This shall only be used with commands that are guaranteed to be | ||
328 | * uninterruptable, atomic and SMP safe. | ||
329 | */ | ||
330 | static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) | ||
331 | { | ||
332 | int context_id; | ||
333 | |||
334 | register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); | ||
335 | register u32 r1 asm("r1") = (u32)&context_id; | ||
336 | register u32 r2 asm("r2") = arg1; | ||
337 | |||
338 | asm volatile( | ||
339 | __asmeq("%0", "r0") | ||
340 | __asmeq("%1", "r0") | ||
341 | __asmeq("%2", "r1") | ||
342 | __asmeq("%3", "r2") | ||
343 | #ifdef REQUIRES_SEC | ||
344 | ".arch_extension sec\n" | ||
345 | #endif | ||
346 | "smc #0 @ switch to secure world\n" | ||
347 | : "=r" (r0) | ||
348 | : "r" (r0), "r" (r1), "r" (r2) | ||
349 | : "r3"); | ||
350 | return r0; | ||
351 | } | ||
352 | |||
353 | u32 qcom_scm_get_version(void) | ||
354 | { | ||
355 | int context_id; | ||
356 | static u32 version = -1; | ||
357 | register u32 r0 asm("r0"); | ||
358 | register u32 r1 asm("r1"); | ||
359 | |||
360 | if (version != -1) | ||
361 | return version; | ||
362 | |||
363 | mutex_lock(&qcom_scm_lock); | ||
364 | |||
365 | r0 = 0x1 << 8; | ||
366 | r1 = (u32)&context_id; | ||
367 | do { | ||
368 | asm volatile( | ||
369 | __asmeq("%0", "r0") | ||
370 | __asmeq("%1", "r1") | ||
371 | __asmeq("%2", "r0") | ||
372 | __asmeq("%3", "r1") | ||
373 | #ifdef REQUIRES_SEC | ||
374 | ".arch_extension sec\n" | ||
375 | #endif | ||
376 | "smc #0 @ switch to secure world\n" | ||
377 | : "=r" (r0), "=r" (r1) | ||
378 | : "r" (r0), "r" (r1) | ||
379 | : "r2", "r3"); | ||
380 | } while (r0 == QCOM_SCM_INTERRUPTED); | ||
381 | |||
382 | version = r1; | ||
383 | mutex_unlock(&qcom_scm_lock); | ||
384 | |||
385 | return version; | ||
386 | } | ||
387 | EXPORT_SYMBOL(qcom_scm_get_version); | ||
388 | |||
389 | #define QCOM_SCM_SVC_BOOT 0x1 | ||
390 | #define QCOM_SCM_BOOT_ADDR 0x1 | ||
391 | /* | ||
392 | * Set the cold/warm boot address for one of the CPU cores. | ||
393 | */ | ||
394 | static int qcom_scm_set_boot_addr(u32 addr, int flags) | ||
395 | { | ||
396 | struct { | ||
397 | __le32 flags; | ||
398 | __le32 addr; | ||
399 | } cmd; | ||
400 | |||
401 | cmd.addr = cpu_to_le32(addr); | ||
402 | cmd.flags = cpu_to_le32(flags); | ||
403 | return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, | ||
404 | &cmd, sizeof(cmd), NULL, 0); | ||
405 | } | ||
406 | 25 | ||
407 | /** | 26 | /** |
408 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus | 27 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus |
@@ -414,26 +33,7 @@ static int qcom_scm_set_boot_addr(u32 addr, int flags) | |||
414 | */ | 33 | */ |
415 | int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) | 34 | int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) |
416 | { | 35 | { |
417 | int flags = 0; | 36 | return __qcom_scm_set_cold_boot_addr(entry, cpus); |
418 | int cpu; | ||
419 | int scm_cb_flags[] = { | ||
420 | QCOM_SCM_FLAG_COLDBOOT_CPU0, | ||
421 | QCOM_SCM_FLAG_COLDBOOT_CPU1, | ||
422 | QCOM_SCM_FLAG_COLDBOOT_CPU2, | ||
423 | QCOM_SCM_FLAG_COLDBOOT_CPU3, | ||
424 | }; | ||
425 | |||
426 | if (!cpus || (cpus && cpumask_empty(cpus))) | ||
427 | return -EINVAL; | ||
428 | |||
429 | for_each_cpu(cpu, cpus) { | ||
430 | if (cpu < ARRAY_SIZE(scm_cb_flags)) | ||
431 | flags |= scm_cb_flags[cpu]; | ||
432 | else | ||
433 | set_cpu_present(cpu, false); | ||
434 | } | ||
435 | |||
436 | return qcom_scm_set_boot_addr(virt_to_phys(entry), flags); | ||
437 | } | 37 | } |
438 | EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); | 38 | EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); |
439 | 39 | ||
@@ -447,37 +47,10 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); | |||
447 | */ | 47 | */ |
448 | int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) | 48 | int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) |
449 | { | 49 | { |
450 | int ret; | 50 | return __qcom_scm_set_warm_boot_addr(entry, cpus); |
451 | int flags = 0; | ||
452 | int cpu; | ||
453 | |||
454 | /* | ||
455 | * Reassign only if we are switching from hotplug entry point | ||
456 | * to cpuidle entry point or vice versa. | ||
457 | */ | ||
458 | for_each_cpu(cpu, cpus) { | ||
459 | if (entry == qcom_scm_wb[cpu].entry) | ||
460 | continue; | ||
461 | flags |= qcom_scm_wb[cpu].flag; | ||
462 | } | ||
463 | |||
464 | /* No change in entry function */ | ||
465 | if (!flags) | ||
466 | return 0; | ||
467 | |||
468 | ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags); | ||
469 | if (!ret) { | ||
470 | for_each_cpu(cpu, cpus) | ||
471 | qcom_scm_wb[cpu].entry = entry; | ||
472 | } | ||
473 | |||
474 | return ret; | ||
475 | } | 51 | } |
476 | EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); | 52 | EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); |
477 | 53 | ||
478 | #define QCOM_SCM_CMD_TERMINATE_PC 0x2 | ||
479 | #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 | ||
480 | |||
481 | /** | 54 | /** |
482 | * qcom_scm_cpu_power_down() - Power down the cpu | 55 | * qcom_scm_cpu_power_down() - Power down the cpu |
483 | * @flags - Flags to flush cache | 56 | * @flags - Flags to flush cache |
@@ -488,7 +61,36 @@ EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); | |||
488 | */ | 61 | */ |
489 | void qcom_scm_cpu_power_down(u32 flags) | 62 | void qcom_scm_cpu_power_down(u32 flags) |
490 | { | 63 | { |
491 | qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, | 64 | __qcom_scm_cpu_power_down(flags); |
492 | flags & QCOM_SCM_FLUSH_FLAG_MASK); | ||
493 | } | 65 | } |
494 | EXPORT_SYMBOL(qcom_scm_cpu_power_down); | 66 | EXPORT_SYMBOL(qcom_scm_cpu_power_down); |
67 | |||
68 | /** | ||
69 | * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. | ||
70 | * | ||
71 | * Return true if HDCP is supported, false if not. | ||
72 | */ | ||
73 | bool qcom_scm_hdcp_available(void) | ||
74 | { | ||
75 | int ret; | ||
76 | |||
77 | ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP, | ||
78 | QCOM_SCM_CMD_HDCP); | ||
79 | |||
80 | return (ret > 0) ? true : false; | ||
81 | } | ||
82 | EXPORT_SYMBOL(qcom_scm_hdcp_available); | ||
83 | |||
84 | /** | ||
85 | * qcom_scm_hdcp_req() - Send HDCP request. | ||
86 | * @req: HDCP request array | ||
87 | * @req_cnt: HDCP request array count | ||
88 | * @resp: response buffer passed to SCM | ||
89 | * | ||
90 | * Write HDCP register(s) through SCM. | ||
91 | */ | ||
92 | int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) | ||
93 | { | ||
94 | return __qcom_scm_hdcp_req(req, req_cnt, resp); | ||
95 | } | ||
96 | EXPORT_SYMBOL(qcom_scm_hdcp_req); | ||
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h new file mode 100644 index 000000000000..2cce75c08b99 --- /dev/null +++ b/drivers/firmware/qcom_scm.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | #ifndef __QCOM_SCM_INT_H | ||
13 | #define __QCOM_SCM_INT_H | ||
14 | |||
15 | #define QCOM_SCM_SVC_BOOT 0x1 | ||
16 | #define QCOM_SCM_BOOT_ADDR 0x1 | ||
17 | #define QCOM_SCM_BOOT_ADDR_MC 0x11 | ||
18 | |||
19 | #define QCOM_SCM_FLAG_HLOS 0x01 | ||
20 | #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 | ||
21 | #define QCOM_SCM_FLAG_WARMBOOT_MC 0x04 | ||
22 | extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); | ||
23 | extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); | ||
24 | |||
25 | #define QCOM_SCM_CMD_TERMINATE_PC 0x2 | ||
26 | #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 | ||
27 | #define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 | ||
28 | extern void __qcom_scm_cpu_power_down(u32 flags); | ||
29 | |||
30 | #define QCOM_SCM_SVC_INFO 0x6 | ||
31 | #define QCOM_IS_CALL_AVAIL_CMD 0x1 | ||
32 | extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id); | ||
33 | |||
34 | #define QCOM_SCM_SVC_HDCP 0x11 | ||
35 | #define QCOM_SCM_CMD_HDCP 0x01 | ||
36 | extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, | ||
37 | u32 *resp); | ||
38 | |||
39 | /* common error codes */ | ||
40 | #define QCOM_SCM_ENOMEM -5 | ||
41 | #define QCOM_SCM_EOPNOTSUPP -4 | ||
42 | #define QCOM_SCM_EINVAL_ADDR -3 | ||
43 | #define QCOM_SCM_EINVAL_ARG -2 | ||
44 | #define QCOM_SCM_ERROR -1 | ||
45 | #define QCOM_SCM_INTERRUPTED 1 | ||
46 | |||
47 | #endif | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 1ae4e547b419..73f918d066c6 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -219,7 +219,7 @@ config TEGRA_IOMMU_SMMU | |||
219 | select IOMMU_API | 219 | select IOMMU_API |
220 | help | 220 | help |
221 | This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra | 221 | This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra |
222 | SoCs (Tegra30 up to Tegra124). | 222 | SoCs (Tegra30 up to Tegra132). |
223 | 223 | ||
224 | config EXYNOS_IOMMU | 224 | config EXYNOS_IOMMU |
225 | bool "Exynos IOMMU Support" | 225 | bool "Exynos IOMMU Support" |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index c845d99ecf6b..c1f2e521dc52 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bitops.h> | 9 | #include <linux/bitops.h> |
10 | #include <linux/debugfs.h> | ||
10 | #include <linux/err.h> | 11 | #include <linux/err.h> |
11 | #include <linux/iommu.h> | 12 | #include <linux/iommu.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -31,6 +32,8 @@ struct tegra_smmu { | |||
31 | struct mutex lock; | 32 | struct mutex lock; |
32 | 33 | ||
33 | struct list_head list; | 34 | struct list_head list; |
35 | |||
36 | struct dentry *debugfs; | ||
34 | }; | 37 | }; |
35 | 38 | ||
36 | struct tegra_smmu_as { | 39 | struct tegra_smmu_as { |
@@ -673,6 +676,103 @@ static void tegra_smmu_ahb_enable(void) | |||
673 | } | 676 | } |
674 | } | 677 | } |
675 | 678 | ||
679 | static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) | ||
680 | { | ||
681 | struct tegra_smmu *smmu = s->private; | ||
682 | unsigned int i; | ||
683 | u32 value; | ||
684 | |||
685 | seq_printf(s, "swgroup enabled ASID\n"); | ||
686 | seq_printf(s, "------------------------\n"); | ||
687 | |||
688 | for (i = 0; i < smmu->soc->num_swgroups; i++) { | ||
689 | const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; | ||
690 | const char *status; | ||
691 | unsigned int asid; | ||
692 | |||
693 | value = smmu_readl(smmu, group->reg); | ||
694 | |||
695 | if (value & SMMU_ASID_ENABLE) | ||
696 | status = "yes"; | ||
697 | else | ||
698 | status = "no"; | ||
699 | |||
700 | asid = value & SMMU_ASID_MASK; | ||
701 | |||
702 | seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, | ||
703 | asid); | ||
704 | } | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file) | ||
710 | { | ||
711 | return single_open(file, tegra_smmu_swgroups_show, inode->i_private); | ||
712 | } | ||
713 | |||
714 | static const struct file_operations tegra_smmu_swgroups_fops = { | ||
715 | .open = tegra_smmu_swgroups_open, | ||
716 | .read = seq_read, | ||
717 | .llseek = seq_lseek, | ||
718 | .release = single_release, | ||
719 | }; | ||
720 | |||
721 | static int tegra_smmu_clients_show(struct seq_file *s, void *data) | ||
722 | { | ||
723 | struct tegra_smmu *smmu = s->private; | ||
724 | unsigned int i; | ||
725 | u32 value; | ||
726 | |||
727 | seq_printf(s, "client enabled\n"); | ||
728 | seq_printf(s, "--------------------\n"); | ||
729 | |||
730 | for (i = 0; i < smmu->soc->num_clients; i++) { | ||
731 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; | ||
732 | const char *status; | ||
733 | |||
734 | value = smmu_readl(smmu, client->smmu.reg); | ||
735 | |||
736 | if (value & BIT(client->smmu.bit)) | ||
737 | status = "yes"; | ||
738 | else | ||
739 | status = "no"; | ||
740 | |||
741 | seq_printf(s, "%-12s %s\n", client->name, status); | ||
742 | } | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static int tegra_smmu_clients_open(struct inode *inode, struct file *file) | ||
748 | { | ||
749 | return single_open(file, tegra_smmu_clients_show, inode->i_private); | ||
750 | } | ||
751 | |||
752 | static const struct file_operations tegra_smmu_clients_fops = { | ||
753 | .open = tegra_smmu_clients_open, | ||
754 | .read = seq_read, | ||
755 | .llseek = seq_lseek, | ||
756 | .release = single_release, | ||
757 | }; | ||
758 | |||
759 | static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) | ||
760 | { | ||
761 | smmu->debugfs = debugfs_create_dir("smmu", NULL); | ||
762 | if (!smmu->debugfs) | ||
763 | return; | ||
764 | |||
765 | debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, | ||
766 | &tegra_smmu_swgroups_fops); | ||
767 | debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, | ||
768 | &tegra_smmu_clients_fops); | ||
769 | } | ||
770 | |||
771 | static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) | ||
772 | { | ||
773 | debugfs_remove_recursive(smmu->debugfs); | ||
774 | } | ||
775 | |||
676 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, | 776 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
677 | const struct tegra_smmu_soc *soc, | 777 | const struct tegra_smmu_soc *soc, |
678 | struct tegra_mc *mc) | 778 | struct tegra_mc *mc) |
@@ -743,5 +843,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, | |||
743 | if (err < 0) | 843 | if (err < 0) |
744 | return ERR_PTR(err); | 844 | return ERR_PTR(err); |
745 | 845 | ||
846 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
847 | tegra_smmu_debugfs_init(smmu); | ||
848 | |||
746 | return smmu; | 849 | return smmu; |
747 | } | 850 | } |
851 | |||
852 | void tegra_smmu_remove(struct tegra_smmu *smmu) | ||
853 | { | ||
854 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
855 | tegra_smmu_debugfs_exit(smmu); | ||
856 | } | ||
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c index 6896e2d9ba58..d1660b039812 100644 --- a/drivers/leds/leds-syscon.c +++ b/drivers/leds/leds-syscon.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * MA 02111-1307 USA | 20 | * MA 02111-1307 USA |
21 | */ | 21 | */ |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/of_device.h> | 24 | #include <linux/of_device.h> |
24 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
25 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
@@ -66,102 +67,101 @@ static void syscon_led_set(struct led_classdev *led_cdev, | |||
66 | dev_err(sled->cdev.dev, "error updating LED status\n"); | 67 | dev_err(sled->cdev.dev, "error updating LED status\n"); |
67 | } | 68 | } |
68 | 69 | ||
69 | static int __init syscon_leds_spawn(struct device_node *np, | 70 | static int syscon_led_probe(struct platform_device *pdev) |
70 | struct device *dev, | ||
71 | struct regmap *map) | ||
72 | { | 71 | { |
73 | struct device_node *child; | 72 | struct device *dev = &pdev->dev; |
73 | struct device_node *np = dev->of_node; | ||
74 | struct device *parent; | ||
75 | struct regmap *map; | ||
76 | struct syscon_led *sled; | ||
77 | const char *state; | ||
74 | int ret; | 78 | int ret; |
75 | 79 | ||
76 | for_each_available_child_of_node(np, child) { | 80 | parent = dev->parent; |
77 | struct syscon_led *sled; | 81 | if (!parent) { |
78 | const char *state; | 82 | dev_err(dev, "no parent for syscon LED\n"); |
79 | 83 | return -ENODEV; | |
80 | /* Only check for register-bit-leds */ | 84 | } |
81 | if (of_property_match_string(child, "compatible", | 85 | map = syscon_node_to_regmap(parent->of_node); |
82 | "register-bit-led") < 0) | 86 | if (!map) { |
83 | continue; | 87 | dev_err(dev, "no regmap for syscon LED parent\n"); |
84 | 88 | return -ENODEV; | |
85 | sled = devm_kzalloc(dev, sizeof(*sled), GFP_KERNEL); | 89 | } |
86 | if (!sled) | 90 | |
87 | return -ENOMEM; | 91 | sled = devm_kzalloc(dev, sizeof(*sled), GFP_KERNEL); |
88 | 92 | if (!sled) | |
89 | sled->map = map; | 93 | return -ENOMEM; |
90 | 94 | ||
91 | if (of_property_read_u32(child, "offset", &sled->offset)) | 95 | sled->map = map; |
92 | return -EINVAL; | 96 | |
93 | if (of_property_read_u32(child, "mask", &sled->mask)) | 97 | if (of_property_read_u32(np, "offset", &sled->offset)) |
94 | return -EINVAL; | 98 | return -EINVAL; |
95 | sled->cdev.name = | 99 | if (of_property_read_u32(np, "mask", &sled->mask)) |
96 | of_get_property(child, "label", NULL) ? : child->name; | 100 | return -EINVAL; |
97 | sled->cdev.default_trigger = | 101 | sled->cdev.name = |
98 | of_get_property(child, "linux,default-trigger", NULL); | 102 | of_get_property(np, "label", NULL) ? : np->name; |
99 | 103 | sled->cdev.default_trigger = | |
100 | state = of_get_property(child, "default-state", NULL); | 104 | of_get_property(np, "linux,default-trigger", NULL); |
101 | if (state) { | 105 | |
102 | if (!strcmp(state, "keep")) { | 106 | state = of_get_property(np, "default-state", NULL); |
103 | u32 val; | 107 | if (state) { |
104 | 108 | if (!strcmp(state, "keep")) { | |
105 | ret = regmap_read(map, sled->offset, &val); | 109 | u32 val; |
106 | if (ret < 0) | 110 | |
107 | return ret; | 111 | ret = regmap_read(map, sled->offset, &val); |
108 | sled->state = !!(val & sled->mask); | 112 | if (ret < 0) |
109 | } else if (!strcmp(state, "on")) { | 113 | return ret; |
110 | sled->state = true; | 114 | sled->state = !!(val & sled->mask); |
111 | ret = regmap_update_bits(map, sled->offset, | 115 | } else if (!strcmp(state, "on")) { |
112 | sled->mask, | 116 | sled->state = true; |
113 | sled->mask); | 117 | ret = regmap_update_bits(map, sled->offset, |
114 | if (ret < 0) | 118 | sled->mask, |
115 | return ret; | 119 | sled->mask); |
116 | } else { | 120 | if (ret < 0) |
117 | sled->state = false; | 121 | return ret; |
118 | ret = regmap_update_bits(map, sled->offset, | 122 | } else { |
119 | sled->mask, 0); | 123 | sled->state = false; |
120 | if (ret < 0) | 124 | ret = regmap_update_bits(map, sled->offset, |
121 | return ret; | 125 | sled->mask, 0); |
122 | } | 126 | if (ret < 0) |
127 | return ret; | ||
123 | } | 128 | } |
124 | sled->cdev.brightness_set = syscon_led_set; | 129 | } |
130 | sled->cdev.brightness_set = syscon_led_set; | ||
125 | 131 | ||
126 | ret = led_classdev_register(dev, &sled->cdev); | 132 | ret = led_classdev_register(dev, &sled->cdev); |
127 | if (ret < 0) | 133 | if (ret < 0) |
128 | return ret; | 134 | return ret; |
135 | |||
136 | platform_set_drvdata(pdev, sled); | ||
137 | dev_info(dev, "registered LED %s\n", sled->cdev.name); | ||
129 | 138 | ||
130 | dev_info(dev, "registered LED %s\n", sled->cdev.name); | ||
131 | } | ||
132 | return 0; | 139 | return 0; |
133 | } | 140 | } |
134 | 141 | ||
135 | static int __init syscon_leds_init(void) | 142 | static int syscon_led_remove(struct platform_device *pdev) |
136 | { | 143 | { |
137 | struct device_node *np; | 144 | struct syscon_led *sled = platform_get_drvdata(pdev); |
138 | |||
139 | for_each_of_allnodes(np) { | ||
140 | struct platform_device *pdev; | ||
141 | struct regmap *map; | ||
142 | int ret; | ||
143 | 145 | ||
144 | if (!of_device_is_compatible(np, "syscon")) | 146 | led_classdev_unregister(&sled->cdev); |
145 | continue; | 147 | /* Turn it off */ |
148 | regmap_update_bits(sled->map, sled->offset, sled->mask, 0); | ||
149 | return 0; | ||
150 | } | ||
146 | 151 | ||
147 | map = syscon_node_to_regmap(np); | 152 | static const struct of_device_id of_syscon_leds_match[] = { |
148 | if (IS_ERR(map)) { | 153 | { .compatible = "register-bit-led", }, |
149 | pr_err("error getting regmap for syscon LEDs\n"); | 154 | {}, |
150 | continue; | 155 | }; |
151 | } | ||
152 | 156 | ||
153 | /* | 157 | MODULE_DEVICE_TABLE(of, of_syscon_leds_match); |
154 | * If the map is there, the device should be there, we allocate | ||
155 | * memory on the syscon device's behalf here. | ||
156 | */ | ||
157 | pdev = of_find_device_by_node(np); | ||
158 | if (!pdev) | ||
159 | return -ENODEV; | ||
160 | ret = syscon_leds_spawn(np, &pdev->dev, map); | ||
161 | if (ret) | ||
162 | dev_err(&pdev->dev, "could not spawn syscon LEDs\n"); | ||
163 | } | ||
164 | 158 | ||
165 | return 0; | 159 | static struct platform_driver syscon_led_driver = { |
166 | } | 160 | .probe = syscon_led_probe, |
167 | device_initcall(syscon_leds_init); | 161 | .remove = syscon_led_remove, |
162 | .driver = { | ||
163 | .name = "leds-syscon", | ||
164 | .of_match_table = of_syscon_leds_match, | ||
165 | }, | ||
166 | }; | ||
167 | module_platform_driver(syscon_led_driver); | ||
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig index 571087621827..6d74e499e18d 100644 --- a/drivers/memory/tegra/Kconfig +++ b/drivers/memory/tegra/Kconfig | |||
@@ -5,3 +5,13 @@ config TEGRA_MC | |||
5 | help | 5 | help |
6 | This driver supports the Memory Controller (MC) hardware found on | 6 | This driver supports the Memory Controller (MC) hardware found on |
7 | NVIDIA Tegra SoCs. | 7 | NVIDIA Tegra SoCs. |
8 | |||
9 | config TEGRA124_EMC | ||
10 | bool "NVIDIA Tegra124 External Memory Controller driver" | ||
11 | default y | ||
12 | depends on TEGRA_MC && ARCH_TEGRA_124_SOC | ||
13 | help | ||
14 | This driver is for the External Memory Controller (EMC) found on | ||
15 | Tegra124 chips. The EMC controls the external DRAM on the board. | ||
16 | This driver is required to change memory timings / clock rate for | ||
17 | external memory. | ||
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile index 0d9f497b786c..6a0b9ac54f05 100644 --- a/drivers/memory/tegra/Makefile +++ b/drivers/memory/tegra/Makefile | |||
@@ -3,5 +3,8 @@ tegra-mc-y := mc.o | |||
3 | tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o | 3 | tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o |
4 | tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o | 4 | tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o |
5 | tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o | 5 | tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o |
6 | tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_TEGRA_MC) += tegra-mc.o | 8 | obj-$(CONFIG_TEGRA_MC) += tegra-mc.o |
9 | |||
10 | obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o | ||
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index fe3c44e7e1d1..c71ede67e6c8 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #include <linux/of.h> | 13 | #include <linux/of.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sort.h> | ||
17 | |||
18 | #include <soc/tegra/fuse.h> | ||
16 | 19 | ||
17 | #include "mc.h" | 20 | #include "mc.h" |
18 | 21 | ||
@@ -48,6 +51,9 @@ | |||
48 | #define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff | 51 | #define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff |
49 | #define MC_EMEM_ARB_MISC0 0xd8 | 52 | #define MC_EMEM_ARB_MISC0 0xd8 |
50 | 53 | ||
54 | #define MC_EMEM_ADR_CFG 0x54 | ||
55 | #define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0) | ||
56 | |||
51 | static const struct of_device_id tegra_mc_of_match[] = { | 57 | static const struct of_device_id tegra_mc_of_match[] = { |
52 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC | 58 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC |
53 | { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc }, | 59 | { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc }, |
@@ -58,6 +64,9 @@ static const struct of_device_id tegra_mc_of_match[] = { | |||
58 | #ifdef CONFIG_ARCH_TEGRA_124_SOC | 64 | #ifdef CONFIG_ARCH_TEGRA_124_SOC |
59 | { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc }, | 65 | { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc }, |
60 | #endif | 66 | #endif |
67 | #ifdef CONFIG_ARCH_TEGRA_132_SOC | ||
68 | { .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc }, | ||
69 | #endif | ||
61 | { } | 70 | { } |
62 | }; | 71 | }; |
63 | MODULE_DEVICE_TABLE(of, tegra_mc_of_match); | 72 | MODULE_DEVICE_TABLE(of, tegra_mc_of_match); |
@@ -91,6 +100,130 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc) | |||
91 | return 0; | 100 | return 0; |
92 | } | 101 | } |
93 | 102 | ||
103 | void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate) | ||
104 | { | ||
105 | unsigned int i; | ||
106 | struct tegra_mc_timing *timing = NULL; | ||
107 | |||
108 | for (i = 0; i < mc->num_timings; i++) { | ||
109 | if (mc->timings[i].rate == rate) { | ||
110 | timing = &mc->timings[i]; | ||
111 | break; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | if (!timing) { | ||
116 | dev_err(mc->dev, "no memory timing registered for rate %lu\n", | ||
117 | rate); | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | for (i = 0; i < mc->soc->num_emem_regs; ++i) | ||
122 | mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]); | ||
123 | } | ||
124 | |||
125 | unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc) | ||
126 | { | ||
127 | u8 dram_count; | ||
128 | |||
129 | dram_count = mc_readl(mc, MC_EMEM_ADR_CFG); | ||
130 | dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV; | ||
131 | dram_count++; | ||
132 | |||
133 | return dram_count; | ||
134 | } | ||
135 | |||
136 | static int load_one_timing(struct tegra_mc *mc, | ||
137 | struct tegra_mc_timing *timing, | ||
138 | struct device_node *node) | ||
139 | { | ||
140 | int err; | ||
141 | u32 tmp; | ||
142 | |||
143 | err = of_property_read_u32(node, "clock-frequency", &tmp); | ||
144 | if (err) { | ||
145 | dev_err(mc->dev, | ||
146 | "timing %s: failed to read rate\n", node->name); | ||
147 | return err; | ||
148 | } | ||
149 | |||
150 | timing->rate = tmp; | ||
151 | timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs, | ||
152 | sizeof(u32), GFP_KERNEL); | ||
153 | if (!timing->emem_data) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | err = of_property_read_u32_array(node, "nvidia,emem-configuration", | ||
157 | timing->emem_data, | ||
158 | mc->soc->num_emem_regs); | ||
159 | if (err) { | ||
160 | dev_err(mc->dev, | ||
161 | "timing %s: failed to read EMEM configuration\n", | ||
162 | node->name); | ||
163 | return err; | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int load_timings(struct tegra_mc *mc, struct device_node *node) | ||
170 | { | ||
171 | struct device_node *child; | ||
172 | struct tegra_mc_timing *timing; | ||
173 | int child_count = of_get_child_count(node); | ||
174 | int i = 0, err; | ||
175 | |||
176 | mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing), | ||
177 | GFP_KERNEL); | ||
178 | if (!mc->timings) | ||
179 | return -ENOMEM; | ||
180 | |||
181 | mc->num_timings = child_count; | ||
182 | |||
183 | for_each_child_of_node(node, child) { | ||
184 | timing = &mc->timings[i++]; | ||
185 | |||
186 | err = load_one_timing(mc, timing, child); | ||
187 | if (err) | ||
188 | return err; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static int tegra_mc_setup_timings(struct tegra_mc *mc) | ||
195 | { | ||
196 | struct device_node *node; | ||
197 | u32 ram_code, node_ram_code; | ||
198 | int err; | ||
199 | |||
200 | ram_code = tegra_read_ram_code(); | ||
201 | |||
202 | mc->num_timings = 0; | ||
203 | |||
204 | for_each_child_of_node(mc->dev->of_node, node) { | ||
205 | err = of_property_read_u32(node, "nvidia,ram-code", | ||
206 | &node_ram_code); | ||
207 | if (err || (node_ram_code != ram_code)) { | ||
208 | of_node_put(node); | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | err = load_timings(mc, node); | ||
213 | if (err) | ||
214 | return err; | ||
215 | of_node_put(node); | ||
216 | break; | ||
217 | } | ||
218 | |||
219 | if (mc->num_timings == 0) | ||
220 | dev_warn(mc->dev, | ||
221 | "no memory timings for RAM code %u registered\n", | ||
222 | ram_code); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
94 | static const char *const status_names[32] = { | 227 | static const char *const status_names[32] = { |
95 | [ 1] = "External interrupt", | 228 | [ 1] = "External interrupt", |
96 | [ 6] = "EMEM address decode error", | 229 | [ 6] = "EMEM address decode error", |
@@ -248,6 +381,12 @@ static int tegra_mc_probe(struct platform_device *pdev) | |||
248 | return err; | 381 | return err; |
249 | } | 382 | } |
250 | 383 | ||
384 | err = tegra_mc_setup_timings(mc); | ||
385 | if (err < 0) { | ||
386 | dev_err(&pdev->dev, "failed to setup timings: %d\n", err); | ||
387 | return err; | ||
388 | } | ||
389 | |||
251 | if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) { | 390 | if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) { |
252 | mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc); | 391 | mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc); |
253 | if (IS_ERR(mc->smmu)) { | 392 | if (IS_ERR(mc->smmu)) { |
@@ -273,8 +412,8 @@ static int tegra_mc_probe(struct platform_device *pdev) | |||
273 | 412 | ||
274 | value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | | 413 | value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | |
275 | MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | | 414 | MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | |
276 | MC_INT_ARBITRATION_EMEM | MC_INT_SECURITY_VIOLATION | | 415 | MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; |
277 | MC_INT_DECERR_EMEM; | 416 | |
278 | mc_writel(mc, value, MC_INTMASK); | 417 | mc_writel(mc, value, MC_INTMASK); |
279 | 418 | ||
280 | return 0; | 419 | return 0; |
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h index d5d21147fc77..b7361b0a6696 100644 --- a/drivers/memory/tegra/mc.h +++ b/drivers/memory/tegra/mc.h | |||
@@ -37,4 +37,8 @@ extern const struct tegra_mc_soc tegra114_mc_soc; | |||
37 | extern const struct tegra_mc_soc tegra124_mc_soc; | 37 | extern const struct tegra_mc_soc tegra124_mc_soc; |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifdef CONFIG_ARCH_TEGRA_132_SOC | ||
41 | extern const struct tegra_mc_soc tegra132_mc_soc; | ||
42 | #endif | ||
43 | |||
40 | #endif /* MEMORY_TEGRA_MC_H */ | 44 | #endif /* MEMORY_TEGRA_MC_H */ |
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c index 511e9a25c151..9f579589e800 100644 --- a/drivers/memory/tegra/tegra114.c +++ b/drivers/memory/tegra/tegra114.c | |||
@@ -896,22 +896,22 @@ static const struct tegra_mc_client tegra114_mc_clients[] = { | |||
896 | }; | 896 | }; |
897 | 897 | ||
898 | static const struct tegra_smmu_swgroup tegra114_swgroups[] = { | 898 | static const struct tegra_smmu_swgroup tegra114_swgroups[] = { |
899 | { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, | 899 | { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, |
900 | { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, | 900 | { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, |
901 | { .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 }, | 901 | { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 }, |
902 | { .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c }, | 902 | { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c }, |
903 | { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, | 903 | { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, |
904 | { .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 }, | 904 | { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 }, |
905 | { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, | 905 | { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, |
906 | { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, | 906 | { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, |
907 | { .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 }, | 907 | { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 }, |
908 | { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, | 908 | { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, |
909 | { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, | 909 | { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, |
910 | { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, | 910 | { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, |
911 | { .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 }, | 911 | { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 }, |
912 | { .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 }, | 912 | { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 }, |
913 | { .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c }, | 913 | { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c }, |
914 | { .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 }, | 914 | { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 }, |
915 | }; | 915 | }; |
916 | 916 | ||
917 | static void tegra114_flush_dcache(struct page *page, unsigned long offset, | 917 | static void tegra114_flush_dcache(struct page *page, unsigned long offset, |
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c new file mode 100644 index 000000000000..8620355776fe --- /dev/null +++ b/drivers/memory/tegra/tegra124-emc.c | |||
@@ -0,0 +1,1140 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Mikko Perttunen <mperttunen@nvidia.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/clk-provider.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/clkdev.h> | ||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/of_address.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/sort.h> | ||
27 | #include <linux/string.h> | ||
28 | |||
29 | #include <soc/tegra/emc.h> | ||
30 | #include <soc/tegra/fuse.h> | ||
31 | #include <soc/tegra/mc.h> | ||
32 | |||
33 | #define EMC_FBIO_CFG5 0x104 | ||
34 | #define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3 | ||
35 | #define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0 | ||
36 | |||
37 | #define EMC_INTSTATUS 0x0 | ||
38 | #define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4) | ||
39 | |||
40 | #define EMC_CFG 0xc | ||
41 | #define EMC_CFG_DRAM_CLKSTOP_PD BIT(31) | ||
42 | #define EMC_CFG_DRAM_CLKSTOP_SR BIT(30) | ||
43 | #define EMC_CFG_DRAM_ACPD BIT(29) | ||
44 | #define EMC_CFG_DYN_SREF BIT(28) | ||
45 | #define EMC_CFG_PWR_MASK ((0xF << 28) | BIT(18)) | ||
46 | #define EMC_CFG_DSR_VTTGEN_DRV_EN BIT(18) | ||
47 | |||
48 | #define EMC_REFCTRL 0x20 | ||
49 | #define EMC_REFCTRL_DEV_SEL_SHIFT 0 | ||
50 | #define EMC_REFCTRL_ENABLE BIT(31) | ||
51 | |||
52 | #define EMC_TIMING_CONTROL 0x28 | ||
53 | #define EMC_RC 0x2c | ||
54 | #define EMC_RFC 0x30 | ||
55 | #define EMC_RAS 0x34 | ||
56 | #define EMC_RP 0x38 | ||
57 | #define EMC_R2W 0x3c | ||
58 | #define EMC_W2R 0x40 | ||
59 | #define EMC_R2P 0x44 | ||
60 | #define EMC_W2P 0x48 | ||
61 | #define EMC_RD_RCD 0x4c | ||
62 | #define EMC_WR_RCD 0x50 | ||
63 | #define EMC_RRD 0x54 | ||
64 | #define EMC_REXT 0x58 | ||
65 | #define EMC_WDV 0x5c | ||
66 | #define EMC_QUSE 0x60 | ||
67 | #define EMC_QRST 0x64 | ||
68 | #define EMC_QSAFE 0x68 | ||
69 | #define EMC_RDV 0x6c | ||
70 | #define EMC_REFRESH 0x70 | ||
71 | #define EMC_BURST_REFRESH_NUM 0x74 | ||
72 | #define EMC_PDEX2WR 0x78 | ||
73 | #define EMC_PDEX2RD 0x7c | ||
74 | #define EMC_PCHG2PDEN 0x80 | ||
75 | #define EMC_ACT2PDEN 0x84 | ||
76 | #define EMC_AR2PDEN 0x88 | ||
77 | #define EMC_RW2PDEN 0x8c | ||
78 | #define EMC_TXSR 0x90 | ||
79 | #define EMC_TCKE 0x94 | ||
80 | #define EMC_TFAW 0x98 | ||
81 | #define EMC_TRPAB 0x9c | ||
82 | #define EMC_TCLKSTABLE 0xa0 | ||
83 | #define EMC_TCLKSTOP 0xa4 | ||
84 | #define EMC_TREFBW 0xa8 | ||
85 | #define EMC_ODT_WRITE 0xb0 | ||
86 | #define EMC_ODT_READ 0xb4 | ||
87 | #define EMC_WEXT 0xb8 | ||
88 | #define EMC_CTT 0xbc | ||
89 | #define EMC_RFC_SLR 0xc0 | ||
90 | #define EMC_MRS_WAIT_CNT2 0xc4 | ||
91 | |||
92 | #define EMC_MRS_WAIT_CNT 0xc8 | ||
93 | #define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0 | ||
94 | #define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \ | ||
95 | (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT) | ||
96 | #define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16 | ||
97 | #define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \ | ||
98 | (0x3FF << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) | ||
99 | |||
100 | #define EMC_MRS 0xcc | ||
101 | #define EMC_MODE_SET_DLL_RESET BIT(8) | ||
102 | #define EMC_MODE_SET_LONG_CNT BIT(26) | ||
103 | #define EMC_EMRS 0xd0 | ||
104 | #define EMC_REF 0xd4 | ||
105 | #define EMC_PRE 0xd8 | ||
106 | |||
107 | #define EMC_SELF_REF 0xe0 | ||
108 | #define EMC_SELF_REF_CMD_ENABLED BIT(0) | ||
109 | #define EMC_SELF_REF_DEV_SEL_SHIFT 30 | ||
110 | |||
111 | #define EMC_MRW 0xe8 | ||
112 | |||
113 | #define EMC_MRR 0xec | ||
114 | #define EMC_MRR_MA_SHIFT 16 | ||
115 | #define LPDDR2_MR4_TEMP_SHIFT 0 | ||
116 | |||
117 | #define EMC_XM2DQSPADCTRL3 0xf8 | ||
118 | #define EMC_FBIO_SPARE 0x100 | ||
119 | |||
120 | #define EMC_FBIO_CFG6 0x114 | ||
121 | #define EMC_EMRS2 0x12c | ||
122 | #define EMC_MRW2 0x134 | ||
123 | #define EMC_MRW4 0x13c | ||
124 | #define EMC_EINPUT 0x14c | ||
125 | #define EMC_EINPUT_DURATION 0x150 | ||
126 | #define EMC_PUTERM_EXTRA 0x154 | ||
127 | #define EMC_TCKESR 0x158 | ||
128 | #define EMC_TPD 0x15c | ||
129 | |||
130 | #define EMC_AUTO_CAL_CONFIG 0x2a4 | ||
131 | #define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31) | ||
132 | #define EMC_AUTO_CAL_INTERVAL 0x2a8 | ||
133 | #define EMC_AUTO_CAL_STATUS 0x2ac | ||
134 | #define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31) | ||
135 | #define EMC_STATUS 0x2b4 | ||
136 | #define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23) | ||
137 | |||
138 | #define EMC_CFG_2 0x2b8 | ||
139 | #define EMC_CFG_2_MODE_SHIFT 0 | ||
140 | #define EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR BIT(6) | ||
141 | |||
142 | #define EMC_CFG_DIG_DLL 0x2bc | ||
143 | #define EMC_CFG_DIG_DLL_PERIOD 0x2c0 | ||
144 | #define EMC_RDV_MASK 0x2cc | ||
145 | #define EMC_WDV_MASK 0x2d0 | ||
146 | #define EMC_CTT_DURATION 0x2d8 | ||
147 | #define EMC_CTT_TERM_CTRL 0x2dc | ||
148 | #define EMC_ZCAL_INTERVAL 0x2e0 | ||
149 | #define EMC_ZCAL_WAIT_CNT 0x2e4 | ||
150 | |||
151 | #define EMC_ZQ_CAL 0x2ec | ||
152 | #define EMC_ZQ_CAL_CMD BIT(0) | ||
153 | #define EMC_ZQ_CAL_LONG BIT(4) | ||
154 | #define EMC_ZQ_CAL_LONG_CMD_DEV0 \ | ||
155 | (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) | ||
156 | #define EMC_ZQ_CAL_LONG_CMD_DEV1 \ | ||
157 | (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) | ||
158 | |||
159 | #define EMC_XM2CMDPADCTRL 0x2f0 | ||
160 | #define EMC_XM2DQSPADCTRL 0x2f8 | ||
161 | #define EMC_XM2DQSPADCTRL2 0x2fc | ||
162 | #define EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE BIT(0) | ||
163 | #define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5) | ||
164 | #define EMC_XM2DQPADCTRL 0x300 | ||
165 | #define EMC_XM2DQPADCTRL2 0x304 | ||
166 | #define EMC_XM2CLKPADCTRL 0x308 | ||
167 | #define EMC_XM2COMPPADCTRL 0x30c | ||
168 | #define EMC_XM2VTTGENPADCTRL 0x310 | ||
169 | #define EMC_XM2VTTGENPADCTRL2 0x314 | ||
170 | #define EMC_XM2VTTGENPADCTRL3 0x318 | ||
171 | #define EMC_XM2DQSPADCTRL4 0x320 | ||
172 | #define EMC_DLL_XFORM_DQS0 0x328 | ||
173 | #define EMC_DLL_XFORM_DQS1 0x32c | ||
174 | #define EMC_DLL_XFORM_DQS2 0x330 | ||
175 | #define EMC_DLL_XFORM_DQS3 0x334 | ||
176 | #define EMC_DLL_XFORM_DQS4 0x338 | ||
177 | #define EMC_DLL_XFORM_DQS5 0x33c | ||
178 | #define EMC_DLL_XFORM_DQS6 0x340 | ||
179 | #define EMC_DLL_XFORM_DQS7 0x344 | ||
180 | #define EMC_DLL_XFORM_QUSE0 0x348 | ||
181 | #define EMC_DLL_XFORM_QUSE1 0x34c | ||
182 | #define EMC_DLL_XFORM_QUSE2 0x350 | ||
183 | #define EMC_DLL_XFORM_QUSE3 0x354 | ||
184 | #define EMC_DLL_XFORM_QUSE4 0x358 | ||
185 | #define EMC_DLL_XFORM_QUSE5 0x35c | ||
186 | #define EMC_DLL_XFORM_QUSE6 0x360 | ||
187 | #define EMC_DLL_XFORM_QUSE7 0x364 | ||
188 | #define EMC_DLL_XFORM_DQ0 0x368 | ||
189 | #define EMC_DLL_XFORM_DQ1 0x36c | ||
190 | #define EMC_DLL_XFORM_DQ2 0x370 | ||
191 | #define EMC_DLL_XFORM_DQ3 0x374 | ||
192 | #define EMC_DLI_TRIM_TXDQS0 0x3a8 | ||
193 | #define EMC_DLI_TRIM_TXDQS1 0x3ac | ||
194 | #define EMC_DLI_TRIM_TXDQS2 0x3b0 | ||
195 | #define EMC_DLI_TRIM_TXDQS3 0x3b4 | ||
196 | #define EMC_DLI_TRIM_TXDQS4 0x3b8 | ||
197 | #define EMC_DLI_TRIM_TXDQS5 0x3bc | ||
198 | #define EMC_DLI_TRIM_TXDQS6 0x3c0 | ||
199 | #define EMC_DLI_TRIM_TXDQS7 0x3c4 | ||
200 | #define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc | ||
201 | #define EMC_SEL_DPD_CTRL 0x3d8 | ||
202 | #define EMC_SEL_DPD_CTRL_DATA_SEL_DPD BIT(8) | ||
203 | #define EMC_SEL_DPD_CTRL_ODT_SEL_DPD BIT(5) | ||
204 | #define EMC_SEL_DPD_CTRL_RESET_SEL_DPD BIT(4) | ||
205 | #define EMC_SEL_DPD_CTRL_CA_SEL_DPD BIT(3) | ||
206 | #define EMC_SEL_DPD_CTRL_CLK_SEL_DPD BIT(2) | ||
207 | #define EMC_SEL_DPD_CTRL_DDR3_MASK \ | ||
208 | ((0xf << 2) | BIT(8)) | ||
209 | #define EMC_SEL_DPD_CTRL_MASK \ | ||
210 | ((0x3 << 2) | BIT(5) | BIT(8)) | ||
211 | #define EMC_PRE_REFRESH_REQ_CNT 0x3dc | ||
212 | #define EMC_DYN_SELF_REF_CONTROL 0x3e0 | ||
213 | #define EMC_TXSRDLL 0x3e4 | ||
214 | #define EMC_CCFIFO_ADDR 0x3e8 | ||
215 | #define EMC_CCFIFO_DATA 0x3ec | ||
216 | #define EMC_CCFIFO_STATUS 0x3f0 | ||
217 | #define EMC_CDB_CNTL_1 0x3f4 | ||
218 | #define EMC_CDB_CNTL_2 0x3f8 | ||
219 | #define EMC_XM2CLKPADCTRL2 0x3fc | ||
220 | #define EMC_AUTO_CAL_CONFIG2 0x458 | ||
221 | #define EMC_AUTO_CAL_CONFIG3 0x45c | ||
222 | #define EMC_IBDLY 0x468 | ||
223 | #define EMC_DLL_XFORM_ADDR0 0x46c | ||
224 | #define EMC_DLL_XFORM_ADDR1 0x470 | ||
225 | #define EMC_DLL_XFORM_ADDR2 0x474 | ||
226 | #define EMC_DSR_VTTGEN_DRV 0x47c | ||
227 | #define EMC_TXDSRVTTGEN 0x480 | ||
228 | #define EMC_XM2CMDPADCTRL4 0x484 | ||
229 | #define EMC_XM2CMDPADCTRL5 0x488 | ||
230 | #define EMC_DLL_XFORM_DQS8 0x4a0 | ||
231 | #define EMC_DLL_XFORM_DQS9 0x4a4 | ||
232 | #define EMC_DLL_XFORM_DQS10 0x4a8 | ||
233 | #define EMC_DLL_XFORM_DQS11 0x4ac | ||
234 | #define EMC_DLL_XFORM_DQS12 0x4b0 | ||
235 | #define EMC_DLL_XFORM_DQS13 0x4b4 | ||
236 | #define EMC_DLL_XFORM_DQS14 0x4b8 | ||
237 | #define EMC_DLL_XFORM_DQS15 0x4bc | ||
238 | #define EMC_DLL_XFORM_QUSE8 0x4c0 | ||
239 | #define EMC_DLL_XFORM_QUSE9 0x4c4 | ||
240 | #define EMC_DLL_XFORM_QUSE10 0x4c8 | ||
241 | #define EMC_DLL_XFORM_QUSE11 0x4cc | ||
242 | #define EMC_DLL_XFORM_QUSE12 0x4d0 | ||
243 | #define EMC_DLL_XFORM_QUSE13 0x4d4 | ||
244 | #define EMC_DLL_XFORM_QUSE14 0x4d8 | ||
245 | #define EMC_DLL_XFORM_QUSE15 0x4dc | ||
246 | #define EMC_DLL_XFORM_DQ4 0x4e0 | ||
247 | #define EMC_DLL_XFORM_DQ5 0x4e4 | ||
248 | #define EMC_DLL_XFORM_DQ6 0x4e8 | ||
249 | #define EMC_DLL_XFORM_DQ7 0x4ec | ||
250 | #define EMC_DLI_TRIM_TXDQS8 0x520 | ||
251 | #define EMC_DLI_TRIM_TXDQS9 0x524 | ||
252 | #define EMC_DLI_TRIM_TXDQS10 0x528 | ||
253 | #define EMC_DLI_TRIM_TXDQS11 0x52c | ||
254 | #define EMC_DLI_TRIM_TXDQS12 0x530 | ||
255 | #define EMC_DLI_TRIM_TXDQS13 0x534 | ||
256 | #define EMC_DLI_TRIM_TXDQS14 0x538 | ||
257 | #define EMC_DLI_TRIM_TXDQS15 0x53c | ||
258 | #define EMC_CDB_CNTL_3 0x540 | ||
259 | #define EMC_XM2DQSPADCTRL5 0x544 | ||
260 | #define EMC_XM2DQSPADCTRL6 0x548 | ||
261 | #define EMC_XM2DQPADCTRL3 0x54c | ||
262 | #define EMC_DLL_XFORM_ADDR3 0x550 | ||
263 | #define EMC_DLL_XFORM_ADDR4 0x554 | ||
264 | #define EMC_DLL_XFORM_ADDR5 0x558 | ||
265 | #define EMC_CFG_PIPE 0x560 | ||
266 | #define EMC_QPOP 0x564 | ||
267 | #define EMC_QUSE_WIDTH 0x568 | ||
268 | #define EMC_PUTERM_WIDTH 0x56c | ||
269 | #define EMC_BGBIAS_CTL0 0x570 | ||
270 | #define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX BIT(3) | ||
271 | #define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN BIT(2) | ||
272 | #define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD BIT(1) | ||
273 | #define EMC_PUTERM_ADJ 0x574 | ||
274 | |||
275 | #define DRAM_DEV_SEL_ALL 0 | ||
276 | #define DRAM_DEV_SEL_0 (2 << 30) | ||
277 | #define DRAM_DEV_SEL_1 (1 << 30) | ||
278 | |||
279 | #define EMC_CFG_POWER_FEATURES_MASK \ | ||
280 | (EMC_CFG_DYN_SREF | EMC_CFG_DRAM_ACPD | EMC_CFG_DRAM_CLKSTOP_SR | \ | ||
281 | EMC_CFG_DRAM_CLKSTOP_PD | EMC_CFG_DSR_VTTGEN_DRV_EN) | ||
282 | #define EMC_REFCTRL_DEV_SEL(n) (((n > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT) | ||
283 | #define EMC_DRAM_DEV_SEL(n) ((n > 1) ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0) | ||
284 | |||
285 | /* Maximum amount of time in us. to wait for changes to become effective */ | ||
286 | #define EMC_STATUS_UPDATE_TIMEOUT 1000 | ||
287 | |||
288 | enum emc_dram_type { | ||
289 | DRAM_TYPE_DDR3 = 0, | ||
290 | DRAM_TYPE_DDR1 = 1, | ||
291 | DRAM_TYPE_LPDDR3 = 2, | ||
292 | DRAM_TYPE_DDR2 = 3 | ||
293 | }; | ||
294 | |||
295 | enum emc_dll_change { | ||
296 | DLL_CHANGE_NONE, | ||
297 | DLL_CHANGE_ON, | ||
298 | DLL_CHANGE_OFF | ||
299 | }; | ||
300 | |||
301 | static const unsigned long emc_burst_regs[] = { | ||
302 | EMC_RC, | ||
303 | EMC_RFC, | ||
304 | EMC_RFC_SLR, | ||
305 | EMC_RAS, | ||
306 | EMC_RP, | ||
307 | EMC_R2W, | ||
308 | EMC_W2R, | ||
309 | EMC_R2P, | ||
310 | EMC_W2P, | ||
311 | EMC_RD_RCD, | ||
312 | EMC_WR_RCD, | ||
313 | EMC_RRD, | ||
314 | EMC_REXT, | ||
315 | EMC_WEXT, | ||
316 | EMC_WDV, | ||
317 | EMC_WDV_MASK, | ||
318 | EMC_QUSE, | ||
319 | EMC_QUSE_WIDTH, | ||
320 | EMC_IBDLY, | ||
321 | EMC_EINPUT, | ||
322 | EMC_EINPUT_DURATION, | ||
323 | EMC_PUTERM_EXTRA, | ||
324 | EMC_PUTERM_WIDTH, | ||
325 | EMC_PUTERM_ADJ, | ||
326 | EMC_CDB_CNTL_1, | ||
327 | EMC_CDB_CNTL_2, | ||
328 | EMC_CDB_CNTL_3, | ||
329 | EMC_QRST, | ||
330 | EMC_QSAFE, | ||
331 | EMC_RDV, | ||
332 | EMC_RDV_MASK, | ||
333 | EMC_REFRESH, | ||
334 | EMC_BURST_REFRESH_NUM, | ||
335 | EMC_PRE_REFRESH_REQ_CNT, | ||
336 | EMC_PDEX2WR, | ||
337 | EMC_PDEX2RD, | ||
338 | EMC_PCHG2PDEN, | ||
339 | EMC_ACT2PDEN, | ||
340 | EMC_AR2PDEN, | ||
341 | EMC_RW2PDEN, | ||
342 | EMC_TXSR, | ||
343 | EMC_TXSRDLL, | ||
344 | EMC_TCKE, | ||
345 | EMC_TCKESR, | ||
346 | EMC_TPD, | ||
347 | EMC_TFAW, | ||
348 | EMC_TRPAB, | ||
349 | EMC_TCLKSTABLE, | ||
350 | EMC_TCLKSTOP, | ||
351 | EMC_TREFBW, | ||
352 | EMC_FBIO_CFG6, | ||
353 | EMC_ODT_WRITE, | ||
354 | EMC_ODT_READ, | ||
355 | EMC_FBIO_CFG5, | ||
356 | EMC_CFG_DIG_DLL, | ||
357 | EMC_CFG_DIG_DLL_PERIOD, | ||
358 | EMC_DLL_XFORM_DQS0, | ||
359 | EMC_DLL_XFORM_DQS1, | ||
360 | EMC_DLL_XFORM_DQS2, | ||
361 | EMC_DLL_XFORM_DQS3, | ||
362 | EMC_DLL_XFORM_DQS4, | ||
363 | EMC_DLL_XFORM_DQS5, | ||
364 | EMC_DLL_XFORM_DQS6, | ||
365 | EMC_DLL_XFORM_DQS7, | ||
366 | EMC_DLL_XFORM_DQS8, | ||
367 | EMC_DLL_XFORM_DQS9, | ||
368 | EMC_DLL_XFORM_DQS10, | ||
369 | EMC_DLL_XFORM_DQS11, | ||
370 | EMC_DLL_XFORM_DQS12, | ||
371 | EMC_DLL_XFORM_DQS13, | ||
372 | EMC_DLL_XFORM_DQS14, | ||
373 | EMC_DLL_XFORM_DQS15, | ||
374 | EMC_DLL_XFORM_QUSE0, | ||
375 | EMC_DLL_XFORM_QUSE1, | ||
376 | EMC_DLL_XFORM_QUSE2, | ||
377 | EMC_DLL_XFORM_QUSE3, | ||
378 | EMC_DLL_XFORM_QUSE4, | ||
379 | EMC_DLL_XFORM_QUSE5, | ||
380 | EMC_DLL_XFORM_QUSE6, | ||
381 | EMC_DLL_XFORM_QUSE7, | ||
382 | EMC_DLL_XFORM_ADDR0, | ||
383 | EMC_DLL_XFORM_ADDR1, | ||
384 | EMC_DLL_XFORM_ADDR2, | ||
385 | EMC_DLL_XFORM_ADDR3, | ||
386 | EMC_DLL_XFORM_ADDR4, | ||
387 | EMC_DLL_XFORM_ADDR5, | ||
388 | EMC_DLL_XFORM_QUSE8, | ||
389 | EMC_DLL_XFORM_QUSE9, | ||
390 | EMC_DLL_XFORM_QUSE10, | ||
391 | EMC_DLL_XFORM_QUSE11, | ||
392 | EMC_DLL_XFORM_QUSE12, | ||
393 | EMC_DLL_XFORM_QUSE13, | ||
394 | EMC_DLL_XFORM_QUSE14, | ||
395 | EMC_DLL_XFORM_QUSE15, | ||
396 | EMC_DLI_TRIM_TXDQS0, | ||
397 | EMC_DLI_TRIM_TXDQS1, | ||
398 | EMC_DLI_TRIM_TXDQS2, | ||
399 | EMC_DLI_TRIM_TXDQS3, | ||
400 | EMC_DLI_TRIM_TXDQS4, | ||
401 | EMC_DLI_TRIM_TXDQS5, | ||
402 | EMC_DLI_TRIM_TXDQS6, | ||
403 | EMC_DLI_TRIM_TXDQS7, | ||
404 | EMC_DLI_TRIM_TXDQS8, | ||
405 | EMC_DLI_TRIM_TXDQS9, | ||
406 | EMC_DLI_TRIM_TXDQS10, | ||
407 | EMC_DLI_TRIM_TXDQS11, | ||
408 | EMC_DLI_TRIM_TXDQS12, | ||
409 | EMC_DLI_TRIM_TXDQS13, | ||
410 | EMC_DLI_TRIM_TXDQS14, | ||
411 | EMC_DLI_TRIM_TXDQS15, | ||
412 | EMC_DLL_XFORM_DQ0, | ||
413 | EMC_DLL_XFORM_DQ1, | ||
414 | EMC_DLL_XFORM_DQ2, | ||
415 | EMC_DLL_XFORM_DQ3, | ||
416 | EMC_DLL_XFORM_DQ4, | ||
417 | EMC_DLL_XFORM_DQ5, | ||
418 | EMC_DLL_XFORM_DQ6, | ||
419 | EMC_DLL_XFORM_DQ7, | ||
420 | EMC_XM2CMDPADCTRL, | ||
421 | EMC_XM2CMDPADCTRL4, | ||
422 | EMC_XM2CMDPADCTRL5, | ||
423 | EMC_XM2DQPADCTRL2, | ||
424 | EMC_XM2DQPADCTRL3, | ||
425 | EMC_XM2CLKPADCTRL, | ||
426 | EMC_XM2CLKPADCTRL2, | ||
427 | EMC_XM2COMPPADCTRL, | ||
428 | EMC_XM2VTTGENPADCTRL, | ||
429 | EMC_XM2VTTGENPADCTRL2, | ||
430 | EMC_XM2VTTGENPADCTRL3, | ||
431 | EMC_XM2DQSPADCTRL3, | ||
432 | EMC_XM2DQSPADCTRL4, | ||
433 | EMC_XM2DQSPADCTRL5, | ||
434 | EMC_XM2DQSPADCTRL6, | ||
435 | EMC_DSR_VTTGEN_DRV, | ||
436 | EMC_TXDSRVTTGEN, | ||
437 | EMC_FBIO_SPARE, | ||
438 | EMC_ZCAL_WAIT_CNT, | ||
439 | EMC_MRS_WAIT_CNT2, | ||
440 | EMC_CTT, | ||
441 | EMC_CTT_DURATION, | ||
442 | EMC_CFG_PIPE, | ||
443 | EMC_DYN_SELF_REF_CONTROL, | ||
444 | EMC_QPOP | ||
445 | }; | ||
446 | |||
447 | struct emc_timing { | ||
448 | unsigned long rate; | ||
449 | |||
450 | u32 emc_burst_data[ARRAY_SIZE(emc_burst_regs)]; | ||
451 | |||
452 | u32 emc_auto_cal_config; | ||
453 | u32 emc_auto_cal_config2; | ||
454 | u32 emc_auto_cal_config3; | ||
455 | u32 emc_auto_cal_interval; | ||
456 | u32 emc_bgbias_ctl0; | ||
457 | u32 emc_cfg; | ||
458 | u32 emc_cfg_2; | ||
459 | u32 emc_ctt_term_ctrl; | ||
460 | u32 emc_mode_1; | ||
461 | u32 emc_mode_2; | ||
462 | u32 emc_mode_4; | ||
463 | u32 emc_mode_reset; | ||
464 | u32 emc_mrs_wait_cnt; | ||
465 | u32 emc_sel_dpd_ctrl; | ||
466 | u32 emc_xm2dqspadctrl2; | ||
467 | u32 emc_zcal_cnt_long; | ||
468 | u32 emc_zcal_interval; | ||
469 | }; | ||
470 | |||
471 | struct tegra_emc { | ||
472 | struct device *dev; | ||
473 | |||
474 | struct tegra_mc *mc; | ||
475 | |||
476 | void __iomem *regs; | ||
477 | |||
478 | enum emc_dram_type dram_type; | ||
479 | unsigned int dram_num; | ||
480 | |||
481 | struct emc_timing last_timing; | ||
482 | struct emc_timing *timings; | ||
483 | unsigned int num_timings; | ||
484 | }; | ||
485 | |||
486 | /* Timing change sequence functions */ | ||
487 | |||
488 | static void emc_ccfifo_writel(struct tegra_emc *emc, u32 value, | ||
489 | unsigned long offset) | ||
490 | { | ||
491 | writel(value, emc->regs + EMC_CCFIFO_DATA); | ||
492 | writel(offset, emc->regs + EMC_CCFIFO_ADDR); | ||
493 | } | ||
494 | |||
495 | static void emc_seq_update_timing(struct tegra_emc *emc) | ||
496 | { | ||
497 | unsigned int i; | ||
498 | u32 value; | ||
499 | |||
500 | writel(1, emc->regs + EMC_TIMING_CONTROL); | ||
501 | |||
502 | for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) { | ||
503 | value = readl(emc->regs + EMC_STATUS); | ||
504 | if ((value & EMC_STATUS_TIMING_UPDATE_STALLED) == 0) | ||
505 | return; | ||
506 | udelay(1); | ||
507 | } | ||
508 | |||
509 | dev_err(emc->dev, "timing update timed out\n"); | ||
510 | } | ||
511 | |||
512 | static void emc_seq_disable_auto_cal(struct tegra_emc *emc) | ||
513 | { | ||
514 | unsigned int i; | ||
515 | u32 value; | ||
516 | |||
517 | writel(0, emc->regs + EMC_AUTO_CAL_INTERVAL); | ||
518 | |||
519 | for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) { | ||
520 | value = readl(emc->regs + EMC_AUTO_CAL_STATUS); | ||
521 | if ((value & EMC_AUTO_CAL_STATUS_ACTIVE) == 0) | ||
522 | return; | ||
523 | udelay(1); | ||
524 | } | ||
525 | |||
526 | dev_err(emc->dev, "auto cal disable timed out\n"); | ||
527 | } | ||
528 | |||
529 | static void emc_seq_wait_clkchange(struct tegra_emc *emc) | ||
530 | { | ||
531 | unsigned int i; | ||
532 | u32 value; | ||
533 | |||
534 | for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) { | ||
535 | value = readl(emc->regs + EMC_INTSTATUS); | ||
536 | if (value & EMC_INTSTATUS_CLKCHANGE_COMPLETE) | ||
537 | return; | ||
538 | udelay(1); | ||
539 | } | ||
540 | |||
541 | dev_err(emc->dev, "clock change timed out\n"); | ||
542 | } | ||
543 | |||
544 | static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc, | ||
545 | unsigned long rate) | ||
546 | { | ||
547 | struct emc_timing *timing = NULL; | ||
548 | unsigned int i; | ||
549 | |||
550 | for (i = 0; i < emc->num_timings; i++) { | ||
551 | if (emc->timings[i].rate == rate) { | ||
552 | timing = &emc->timings[i]; | ||
553 | break; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | if (!timing) { | ||
558 | dev_err(emc->dev, "no timing for rate %lu\n", rate); | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | return timing; | ||
563 | } | ||
564 | |||
565 | int tegra_emc_prepare_timing_change(struct tegra_emc *emc, | ||
566 | unsigned long rate) | ||
567 | { | ||
568 | struct emc_timing *timing = tegra_emc_find_timing(emc, rate); | ||
569 | struct emc_timing *last = &emc->last_timing; | ||
570 | enum emc_dll_change dll_change; | ||
571 | unsigned int pre_wait = 0; | ||
572 | u32 val, val2, mask; | ||
573 | bool update = false; | ||
574 | unsigned int i; | ||
575 | |||
576 | if (!timing) | ||
577 | return -ENOENT; | ||
578 | |||
579 | if ((last->emc_mode_1 & 0x1) == (timing->emc_mode_1 & 0x1)) | ||
580 | dll_change = DLL_CHANGE_NONE; | ||
581 | else if (timing->emc_mode_1 & 0x1) | ||
582 | dll_change = DLL_CHANGE_ON; | ||
583 | else | ||
584 | dll_change = DLL_CHANGE_OFF; | ||
585 | |||
586 | /* Clear CLKCHANGE_COMPLETE interrupts */ | ||
587 | writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, emc->regs + EMC_INTSTATUS); | ||
588 | |||
589 | /* Disable dynamic self-refresh */ | ||
590 | val = readl(emc->regs + EMC_CFG); | ||
591 | if (val & EMC_CFG_PWR_MASK) { | ||
592 | val &= ~EMC_CFG_POWER_FEATURES_MASK; | ||
593 | writel(val, emc->regs + EMC_CFG); | ||
594 | |||
595 | pre_wait = 5; | ||
596 | } | ||
597 | |||
598 | /* Disable SEL_DPD_CTRL for clock change */ | ||
599 | if (emc->dram_type == DRAM_TYPE_DDR3) | ||
600 | mask = EMC_SEL_DPD_CTRL_DDR3_MASK; | ||
601 | else | ||
602 | mask = EMC_SEL_DPD_CTRL_MASK; | ||
603 | |||
604 | val = readl(emc->regs + EMC_SEL_DPD_CTRL); | ||
605 | if (val & mask) { | ||
606 | val &= ~mask; | ||
607 | writel(val, emc->regs + EMC_SEL_DPD_CTRL); | ||
608 | } | ||
609 | |||
610 | /* Prepare DQ/DQS for clock change */ | ||
611 | val = readl(emc->regs + EMC_BGBIAS_CTL0); | ||
612 | val2 = last->emc_bgbias_ctl0; | ||
613 | if (!(timing->emc_bgbias_ctl0 & | ||
614 | EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) && | ||
615 | (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX)) { | ||
616 | val2 &= ~EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX; | ||
617 | update = true; | ||
618 | } | ||
619 | |||
620 | if ((val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD) || | ||
621 | (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN)) { | ||
622 | update = true; | ||
623 | } | ||
624 | |||
625 | if (update) { | ||
626 | writel(val2, emc->regs + EMC_BGBIAS_CTL0); | ||
627 | if (pre_wait < 5) | ||
628 | pre_wait = 5; | ||
629 | } | ||
630 | |||
631 | update = false; | ||
632 | val = readl(emc->regs + EMC_XM2DQSPADCTRL2); | ||
633 | if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_VREF_ENABLE && | ||
634 | !(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) { | ||
635 | val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE; | ||
636 | update = true; | ||
637 | } | ||
638 | |||
639 | if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE && | ||
640 | !(val & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE)) { | ||
641 | val |= EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE; | ||
642 | update = true; | ||
643 | } | ||
644 | |||
645 | if (update) { | ||
646 | writel(val, emc->regs + EMC_XM2DQSPADCTRL2); | ||
647 | if (pre_wait < 30) | ||
648 | pre_wait = 30; | ||
649 | } | ||
650 | |||
651 | /* Wait to settle */ | ||
652 | if (pre_wait) { | ||
653 | emc_seq_update_timing(emc); | ||
654 | udelay(pre_wait); | ||
655 | } | ||
656 | |||
657 | /* Program CTT_TERM control */ | ||
658 | if (last->emc_ctt_term_ctrl != timing->emc_ctt_term_ctrl) { | ||
659 | emc_seq_disable_auto_cal(emc); | ||
660 | writel(timing->emc_ctt_term_ctrl, | ||
661 | emc->regs + EMC_CTT_TERM_CTRL); | ||
662 | emc_seq_update_timing(emc); | ||
663 | } | ||
664 | |||
665 | /* Program burst shadow registers */ | ||
666 | for (i = 0; i < ARRAY_SIZE(timing->emc_burst_data); ++i) | ||
667 | writel(timing->emc_burst_data[i], | ||
668 | emc->regs + emc_burst_regs[i]); | ||
669 | |||
670 | writel(timing->emc_xm2dqspadctrl2, emc->regs + EMC_XM2DQSPADCTRL2); | ||
671 | writel(timing->emc_zcal_interval, emc->regs + EMC_ZCAL_INTERVAL); | ||
672 | |||
673 | tegra_mc_write_emem_configuration(emc->mc, timing->rate); | ||
674 | |||
675 | val = timing->emc_cfg & ~EMC_CFG_POWER_FEATURES_MASK; | ||
676 | emc_ccfifo_writel(emc, val, EMC_CFG); | ||
677 | |||
678 | /* Program AUTO_CAL_CONFIG */ | ||
679 | if (timing->emc_auto_cal_config2 != last->emc_auto_cal_config2) | ||
680 | emc_ccfifo_writel(emc, timing->emc_auto_cal_config2, | ||
681 | EMC_AUTO_CAL_CONFIG2); | ||
682 | |||
683 | if (timing->emc_auto_cal_config3 != last->emc_auto_cal_config3) | ||
684 | emc_ccfifo_writel(emc, timing->emc_auto_cal_config3, | ||
685 | EMC_AUTO_CAL_CONFIG3); | ||
686 | |||
687 | if (timing->emc_auto_cal_config != last->emc_auto_cal_config) { | ||
688 | val = timing->emc_auto_cal_config; | ||
689 | val &= EMC_AUTO_CAL_CONFIG_AUTO_CAL_START; | ||
690 | emc_ccfifo_writel(emc, val, EMC_AUTO_CAL_CONFIG); | ||
691 | } | ||
692 | |||
693 | /* DDR3: predict MRS long wait count */ | ||
694 | if (emc->dram_type == DRAM_TYPE_DDR3 && | ||
695 | dll_change == DLL_CHANGE_ON) { | ||
696 | u32 cnt = 512; | ||
697 | |||
698 | if (timing->emc_zcal_interval != 0 && | ||
699 | last->emc_zcal_interval == 0) | ||
700 | cnt -= emc->dram_num * 256; | ||
701 | |||
702 | val = (timing->emc_mrs_wait_cnt | ||
703 | & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) | ||
704 | >> EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT; | ||
705 | if (cnt < val) | ||
706 | cnt = val; | ||
707 | |||
708 | val = timing->emc_mrs_wait_cnt | ||
709 | & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; | ||
710 | val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) | ||
711 | & EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; | ||
712 | |||
713 | writel(val, emc->regs + EMC_MRS_WAIT_CNT); | ||
714 | } | ||
715 | |||
716 | val = timing->emc_cfg_2; | ||
717 | val &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR; | ||
718 | emc_ccfifo_writel(emc, val, EMC_CFG_2); | ||
719 | |||
720 | /* DDR3: Turn off DLL and enter self-refresh */ | ||
721 | if (emc->dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_OFF) | ||
722 | emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS); | ||
723 | |||
724 | /* Disable refresh controller */ | ||
725 | emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num), | ||
726 | EMC_REFCTRL); | ||
727 | if (emc->dram_type == DRAM_TYPE_DDR3) | ||
728 | emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num) | | ||
729 | EMC_SELF_REF_CMD_ENABLED, | ||
730 | EMC_SELF_REF); | ||
731 | |||
732 | /* Flow control marker */ | ||
733 | emc_ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE); | ||
734 | |||
735 | /* DDR3: Exit self-refresh */ | ||
736 | if (emc->dram_type == DRAM_TYPE_DDR3) | ||
737 | emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num), | ||
738 | EMC_SELF_REF); | ||
739 | emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num) | | ||
740 | EMC_REFCTRL_ENABLE, | ||
741 | EMC_REFCTRL); | ||
742 | |||
743 | /* Set DRAM mode registers */ | ||
744 | if (emc->dram_type == DRAM_TYPE_DDR3) { | ||
745 | if (timing->emc_mode_1 != last->emc_mode_1) | ||
746 | emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS); | ||
747 | if (timing->emc_mode_2 != last->emc_mode_2) | ||
748 | emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_EMRS2); | ||
749 | |||
750 | if ((timing->emc_mode_reset != last->emc_mode_reset) || | ||
751 | dll_change == DLL_CHANGE_ON) { | ||
752 | val = timing->emc_mode_reset; | ||
753 | if (dll_change == DLL_CHANGE_ON) { | ||
754 | val |= EMC_MODE_SET_DLL_RESET; | ||
755 | val |= EMC_MODE_SET_LONG_CNT; | ||
756 | } else { | ||
757 | val &= ~EMC_MODE_SET_DLL_RESET; | ||
758 | } | ||
759 | emc_ccfifo_writel(emc, val, EMC_MRS); | ||
760 | } | ||
761 | } else { | ||
762 | if (timing->emc_mode_2 != last->emc_mode_2) | ||
763 | emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_MRW2); | ||
764 | if (timing->emc_mode_1 != last->emc_mode_1) | ||
765 | emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_MRW); | ||
766 | if (timing->emc_mode_4 != last->emc_mode_4) | ||
767 | emc_ccfifo_writel(emc, timing->emc_mode_4, EMC_MRW4); | ||
768 | } | ||
769 | |||
770 | /* Issue ZCAL command if turning ZCAL on */ | ||
771 | if (timing->emc_zcal_interval != 0 && last->emc_zcal_interval == 0) { | ||
772 | emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL); | ||
773 | if (emc->dram_num > 1) | ||
774 | emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV1, | ||
775 | EMC_ZQ_CAL); | ||
776 | } | ||
777 | |||
778 | /* Write to RO register to remove stall after change */ | ||
779 | emc_ccfifo_writel(emc, 0, EMC_CCFIFO_STATUS); | ||
780 | |||
781 | if (timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR) | ||
782 | emc_ccfifo_writel(emc, timing->emc_cfg_2, EMC_CFG_2); | ||
783 | |||
784 | /* Disable AUTO_CAL for clock change */ | ||
785 | emc_seq_disable_auto_cal(emc); | ||
786 | |||
787 | /* Read register to wait until programming has settled */ | ||
788 | readl(emc->regs + EMC_INTSTATUS); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | void tegra_emc_complete_timing_change(struct tegra_emc *emc, | ||
794 | unsigned long rate) | ||
795 | { | ||
796 | struct emc_timing *timing = tegra_emc_find_timing(emc, rate); | ||
797 | struct emc_timing *last = &emc->last_timing; | ||
798 | u32 val; | ||
799 | |||
800 | if (!timing) | ||
801 | return; | ||
802 | |||
803 | /* Wait until the state machine has settled */ | ||
804 | emc_seq_wait_clkchange(emc); | ||
805 | |||
806 | /* Restore AUTO_CAL */ | ||
807 | if (timing->emc_ctt_term_ctrl != last->emc_ctt_term_ctrl) | ||
808 | writel(timing->emc_auto_cal_interval, | ||
809 | emc->regs + EMC_AUTO_CAL_INTERVAL); | ||
810 | |||
811 | /* Restore dynamic self-refresh */ | ||
812 | if (timing->emc_cfg & EMC_CFG_PWR_MASK) | ||
813 | writel(timing->emc_cfg, emc->regs + EMC_CFG); | ||
814 | |||
815 | /* Set ZCAL wait count */ | ||
816 | writel(timing->emc_zcal_cnt_long, emc->regs + EMC_ZCAL_WAIT_CNT); | ||
817 | |||
818 | /* LPDDR3: Turn off BGBIAS if low frequency */ | ||
819 | if (emc->dram_type == DRAM_TYPE_LPDDR3 && | ||
820 | timing->emc_bgbias_ctl0 & | ||
821 | EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) { | ||
822 | val = timing->emc_bgbias_ctl0; | ||
823 | val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN; | ||
824 | val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD; | ||
825 | writel(val, emc->regs + EMC_BGBIAS_CTL0); | ||
826 | } else { | ||
827 | if (emc->dram_type == DRAM_TYPE_DDR3 && | ||
828 | readl(emc->regs + EMC_BGBIAS_CTL0) != | ||
829 | timing->emc_bgbias_ctl0) { | ||
830 | writel(timing->emc_bgbias_ctl0, | ||
831 | emc->regs + EMC_BGBIAS_CTL0); | ||
832 | } | ||
833 | |||
834 | writel(timing->emc_auto_cal_interval, | ||
835 | emc->regs + EMC_AUTO_CAL_INTERVAL); | ||
836 | } | ||
837 | |||
838 | /* Wait for timing to settle */ | ||
839 | udelay(2); | ||
840 | |||
841 | /* Reprogram SEL_DPD_CTRL */ | ||
842 | writel(timing->emc_sel_dpd_ctrl, emc->regs + EMC_SEL_DPD_CTRL); | ||
843 | emc_seq_update_timing(emc); | ||
844 | |||
845 | emc->last_timing = *timing; | ||
846 | } | ||
847 | |||
848 | /* Initialization and deinitialization */ | ||
849 | |||
850 | static void emc_read_current_timing(struct tegra_emc *emc, | ||
851 | struct emc_timing *timing) | ||
852 | { | ||
853 | unsigned int i; | ||
854 | |||
855 | for (i = 0; i < ARRAY_SIZE(emc_burst_regs); ++i) | ||
856 | timing->emc_burst_data[i] = | ||
857 | readl(emc->regs + emc_burst_regs[i]); | ||
858 | |||
859 | timing->emc_cfg = readl(emc->regs + EMC_CFG); | ||
860 | |||
861 | timing->emc_auto_cal_interval = 0; | ||
862 | timing->emc_zcal_cnt_long = 0; | ||
863 | timing->emc_mode_1 = 0; | ||
864 | timing->emc_mode_2 = 0; | ||
865 | timing->emc_mode_4 = 0; | ||
866 | timing->emc_mode_reset = 0; | ||
867 | } | ||
868 | |||
869 | static int emc_init(struct tegra_emc *emc) | ||
870 | { | ||
871 | emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5); | ||
872 | emc->dram_type &= EMC_FBIO_CFG5_DRAM_TYPE_MASK; | ||
873 | emc->dram_type >>= EMC_FBIO_CFG5_DRAM_TYPE_SHIFT; | ||
874 | |||
875 | emc->dram_num = tegra_mc_get_emem_device_count(emc->mc); | ||
876 | |||
877 | emc_read_current_timing(emc, &emc->last_timing); | ||
878 | |||
879 | return 0; | ||
880 | } | ||
881 | |||
882 | static int load_one_timing_from_dt(struct tegra_emc *emc, | ||
883 | struct emc_timing *timing, | ||
884 | struct device_node *node) | ||
885 | { | ||
886 | u32 value; | ||
887 | int err; | ||
888 | |||
889 | err = of_property_read_u32(node, "clock-frequency", &value); | ||
890 | if (err) { | ||
891 | dev_err(emc->dev, "timing %s: failed to read rate: %d\n", | ||
892 | node->name, err); | ||
893 | return err; | ||
894 | } | ||
895 | |||
896 | timing->rate = value; | ||
897 | |||
898 | err = of_property_read_u32_array(node, "nvidia,emc-configuration", | ||
899 | timing->emc_burst_data, | ||
900 | ARRAY_SIZE(timing->emc_burst_data)); | ||
901 | if (err) { | ||
902 | dev_err(emc->dev, | ||
903 | "timing %s: failed to read emc burst data: %d\n", | ||
904 | node->name, err); | ||
905 | return err; | ||
906 | } | ||
907 | |||
908 | #define EMC_READ_PROP(prop, dtprop) { \ | ||
909 | err = of_property_read_u32(node, dtprop, &timing->prop); \ | ||
910 | if (err) { \ | ||
911 | dev_err(emc->dev, "timing %s: failed to read " #prop ": %d\n", \ | ||
912 | node->name, err); \ | ||
913 | return err; \ | ||
914 | } \ | ||
915 | } | ||
916 | |||
917 | EMC_READ_PROP(emc_auto_cal_config, "nvidia,emc-auto-cal-config") | ||
918 | EMC_READ_PROP(emc_auto_cal_config2, "nvidia,emc-auto-cal-config2") | ||
919 | EMC_READ_PROP(emc_auto_cal_config3, "nvidia,emc-auto-cal-config3") | ||
920 | EMC_READ_PROP(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval") | ||
921 | EMC_READ_PROP(emc_bgbias_ctl0, "nvidia,emc-bgbias-ctl0") | ||
922 | EMC_READ_PROP(emc_cfg, "nvidia,emc-cfg") | ||
923 | EMC_READ_PROP(emc_cfg_2, "nvidia,emc-cfg-2") | ||
924 | EMC_READ_PROP(emc_ctt_term_ctrl, "nvidia,emc-ctt-term-ctrl") | ||
925 | EMC_READ_PROP(emc_mode_1, "nvidia,emc-mode-1") | ||
926 | EMC_READ_PROP(emc_mode_2, "nvidia,emc-mode-2") | ||
927 | EMC_READ_PROP(emc_mode_4, "nvidia,emc-mode-4") | ||
928 | EMC_READ_PROP(emc_mode_reset, "nvidia,emc-mode-reset") | ||
929 | EMC_READ_PROP(emc_mrs_wait_cnt, "nvidia,emc-mrs-wait-cnt") | ||
930 | EMC_READ_PROP(emc_sel_dpd_ctrl, "nvidia,emc-sel-dpd-ctrl") | ||
931 | EMC_READ_PROP(emc_xm2dqspadctrl2, "nvidia,emc-xm2dqspadctrl2") | ||
932 | EMC_READ_PROP(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long") | ||
933 | EMC_READ_PROP(emc_zcal_interval, "nvidia,emc-zcal-interval") | ||
934 | |||
935 | #undef EMC_READ_PROP | ||
936 | |||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | static int cmp_timings(const void *_a, const void *_b) | ||
941 | { | ||
942 | const struct emc_timing *a = _a; | ||
943 | const struct emc_timing *b = _b; | ||
944 | |||
945 | if (a->rate < b->rate) | ||
946 | return -1; | ||
947 | else if (a->rate == b->rate) | ||
948 | return 0; | ||
949 | else | ||
950 | return 1; | ||
951 | } | ||
952 | |||
953 | static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, | ||
954 | struct device_node *node) | ||
955 | { | ||
956 | int child_count = of_get_child_count(node); | ||
957 | struct device_node *child; | ||
958 | struct emc_timing *timing; | ||
959 | unsigned int i = 0; | ||
960 | int err; | ||
961 | |||
962 | emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing), | ||
963 | GFP_KERNEL); | ||
964 | if (!emc->timings) | ||
965 | return -ENOMEM; | ||
966 | |||
967 | emc->num_timings = child_count; | ||
968 | |||
969 | for_each_child_of_node(node, child) { | ||
970 | timing = &emc->timings[i++]; | ||
971 | |||
972 | err = load_one_timing_from_dt(emc, timing, child); | ||
973 | if (err) | ||
974 | return err; | ||
975 | } | ||
976 | |||
977 | sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, | ||
978 | NULL); | ||
979 | |||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | static const struct of_device_id tegra_emc_of_match[] = { | ||
984 | { .compatible = "nvidia,tegra124-emc" }, | ||
985 | {} | ||
986 | }; | ||
987 | |||
988 | static struct device_node * | ||
989 | tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code) | ||
990 | { | ||
991 | struct device_node *np; | ||
992 | int err; | ||
993 | |||
994 | for_each_child_of_node(node, np) { | ||
995 | u32 value; | ||
996 | |||
997 | err = of_property_read_u32(np, "nvidia,ram-code", &value); | ||
998 | if (err || (value != ram_code)) { | ||
999 | of_node_put(np); | ||
1000 | continue; | ||
1001 | } | ||
1002 | |||
1003 | return np; | ||
1004 | } | ||
1005 | |||
1006 | return NULL; | ||
1007 | } | ||
1008 | |||
1009 | /* Debugfs entry */ | ||
1010 | |||
1011 | static int emc_debug_rate_get(void *data, u64 *rate) | ||
1012 | { | ||
1013 | struct clk *c = data; | ||
1014 | |||
1015 | *rate = clk_get_rate(c); | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int emc_debug_rate_set(void *data, u64 rate) | ||
1021 | { | ||
1022 | struct clk *c = data; | ||
1023 | |||
1024 | return clk_set_rate(c, rate); | ||
1025 | } | ||
1026 | |||
1027 | DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get, | ||
1028 | emc_debug_rate_set, "%lld\n"); | ||
1029 | |||
1030 | static void emc_debugfs_init(struct device *dev) | ||
1031 | { | ||
1032 | struct dentry *root, *file; | ||
1033 | struct clk *clk; | ||
1034 | |||
1035 | root = debugfs_create_dir("emc", NULL); | ||
1036 | if (!root) { | ||
1037 | dev_err(dev, "failed to create debugfs directory\n"); | ||
1038 | return; | ||
1039 | } | ||
1040 | |||
1041 | clk = clk_get_sys("tegra-clk-debug", "emc"); | ||
1042 | if (IS_ERR(clk)) { | ||
1043 | dev_err(dev, "failed to get debug clock: %ld\n", PTR_ERR(clk)); | ||
1044 | return; | ||
1045 | } | ||
1046 | |||
1047 | file = debugfs_create_file("rate", S_IRUGO | S_IWUSR, root, clk, | ||
1048 | &emc_debug_rate_fops); | ||
1049 | if (!file) | ||
1050 | dev_err(dev, "failed to create debugfs entry\n"); | ||
1051 | } | ||
1052 | |||
1053 | static int tegra_emc_probe(struct platform_device *pdev) | ||
1054 | { | ||
1055 | struct platform_device *mc; | ||
1056 | struct device_node *np; | ||
1057 | struct tegra_emc *emc; | ||
1058 | struct resource *res; | ||
1059 | u32 ram_code; | ||
1060 | int err; | ||
1061 | |||
1062 | emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); | ||
1063 | if (!emc) | ||
1064 | return -ENOMEM; | ||
1065 | |||
1066 | emc->dev = &pdev->dev; | ||
1067 | |||
1068 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1069 | emc->regs = devm_ioremap_resource(&pdev->dev, res); | ||
1070 | if (IS_ERR(emc->regs)) | ||
1071 | return PTR_ERR(emc->regs); | ||
1072 | |||
1073 | np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0); | ||
1074 | if (!np) { | ||
1075 | dev_err(&pdev->dev, "could not get memory controller\n"); | ||
1076 | return -ENOENT; | ||
1077 | } | ||
1078 | |||
1079 | mc = of_find_device_by_node(np); | ||
1080 | if (!mc) | ||
1081 | return -ENOENT; | ||
1082 | |||
1083 | of_node_put(np); | ||
1084 | |||
1085 | emc->mc = platform_get_drvdata(mc); | ||
1086 | if (!emc->mc) | ||
1087 | return -EPROBE_DEFER; | ||
1088 | |||
1089 | ram_code = tegra_read_ram_code(); | ||
1090 | |||
1091 | np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code); | ||
1092 | if (!np) { | ||
1093 | dev_err(&pdev->dev, | ||
1094 | "no memory timings for RAM code %u found in DT\n", | ||
1095 | ram_code); | ||
1096 | return -ENOENT; | ||
1097 | } | ||
1098 | |||
1099 | err = tegra_emc_load_timings_from_dt(emc, np); | ||
1100 | |||
1101 | of_node_put(np); | ||
1102 | |||
1103 | if (err) | ||
1104 | return err; | ||
1105 | |||
1106 | if (emc->num_timings == 0) { | ||
1107 | dev_err(&pdev->dev, | ||
1108 | "no memory timings for RAM code %u registered\n", | ||
1109 | ram_code); | ||
1110 | return -ENOENT; | ||
1111 | } | ||
1112 | |||
1113 | err = emc_init(emc); | ||
1114 | if (err) { | ||
1115 | dev_err(&pdev->dev, "EMC initialization failed: %d\n", err); | ||
1116 | return err; | ||
1117 | } | ||
1118 | |||
1119 | platform_set_drvdata(pdev, emc); | ||
1120 | |||
1121 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
1122 | emc_debugfs_init(&pdev->dev); | ||
1123 | |||
1124 | return 0; | ||
1125 | }; | ||
1126 | |||
1127 | static struct platform_driver tegra_emc_driver = { | ||
1128 | .probe = tegra_emc_probe, | ||
1129 | .driver = { | ||
1130 | .name = "tegra-emc", | ||
1131 | .of_match_table = tegra_emc_of_match, | ||
1132 | .suppress_bind_attrs = true, | ||
1133 | }, | ||
1134 | }; | ||
1135 | |||
1136 | static int tegra_emc_init(void) | ||
1137 | { | ||
1138 | return platform_driver_register(&tegra_emc_driver); | ||
1139 | } | ||
1140 | subsys_initcall(tegra_emc_init); | ||
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c index 278d40b854c1..966e1557e6f4 100644 --- a/drivers/memory/tegra/tegra124.c +++ b/drivers/memory/tegra/tegra124.c | |||
@@ -15,6 +15,48 @@ | |||
15 | 15 | ||
16 | #include "mc.h" | 16 | #include "mc.h" |
17 | 17 | ||
18 | #define MC_EMEM_ARB_CFG 0x90 | ||
19 | #define MC_EMEM_ARB_OUTSTANDING_REQ 0x94 | ||
20 | #define MC_EMEM_ARB_TIMING_RCD 0x98 | ||
21 | #define MC_EMEM_ARB_TIMING_RP 0x9c | ||
22 | #define MC_EMEM_ARB_TIMING_RC 0xa0 | ||
23 | #define MC_EMEM_ARB_TIMING_RAS 0xa4 | ||
24 | #define MC_EMEM_ARB_TIMING_FAW 0xa8 | ||
25 | #define MC_EMEM_ARB_TIMING_RRD 0xac | ||
26 | #define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0 | ||
27 | #define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4 | ||
28 | #define MC_EMEM_ARB_TIMING_R2R 0xb8 | ||
29 | #define MC_EMEM_ARB_TIMING_W2W 0xbc | ||
30 | #define MC_EMEM_ARB_TIMING_R2W 0xc0 | ||
31 | #define MC_EMEM_ARB_TIMING_W2R 0xc4 | ||
32 | #define MC_EMEM_ARB_DA_TURNS 0xd0 | ||
33 | #define MC_EMEM_ARB_DA_COVERS 0xd4 | ||
34 | #define MC_EMEM_ARB_MISC0 0xd8 | ||
35 | #define MC_EMEM_ARB_MISC1 0xdc | ||
36 | #define MC_EMEM_ARB_RING1_THROTTLE 0xe0 | ||
37 | |||
38 | static const unsigned long tegra124_mc_emem_regs[] = { | ||
39 | MC_EMEM_ARB_CFG, | ||
40 | MC_EMEM_ARB_OUTSTANDING_REQ, | ||
41 | MC_EMEM_ARB_TIMING_RCD, | ||
42 | MC_EMEM_ARB_TIMING_RP, | ||
43 | MC_EMEM_ARB_TIMING_RC, | ||
44 | MC_EMEM_ARB_TIMING_RAS, | ||
45 | MC_EMEM_ARB_TIMING_FAW, | ||
46 | MC_EMEM_ARB_TIMING_RRD, | ||
47 | MC_EMEM_ARB_TIMING_RAP2PRE, | ||
48 | MC_EMEM_ARB_TIMING_WAP2PRE, | ||
49 | MC_EMEM_ARB_TIMING_R2R, | ||
50 | MC_EMEM_ARB_TIMING_W2W, | ||
51 | MC_EMEM_ARB_TIMING_R2W, | ||
52 | MC_EMEM_ARB_TIMING_W2R, | ||
53 | MC_EMEM_ARB_DA_TURNS, | ||
54 | MC_EMEM_ARB_DA_COVERS, | ||
55 | MC_EMEM_ARB_MISC0, | ||
56 | MC_EMEM_ARB_MISC1, | ||
57 | MC_EMEM_ARB_RING1_THROTTLE | ||
58 | }; | ||
59 | |||
18 | static const struct tegra_mc_client tegra124_mc_clients[] = { | 60 | static const struct tegra_mc_client tegra124_mc_clients[] = { |
19 | { | 61 | { |
20 | .id = 0x00, | 62 | .id = 0x00, |
@@ -934,29 +976,29 @@ static const struct tegra_mc_client tegra124_mc_clients[] = { | |||
934 | }; | 976 | }; |
935 | 977 | ||
936 | static const struct tegra_smmu_swgroup tegra124_swgroups[] = { | 978 | static const struct tegra_smmu_swgroup tegra124_swgroups[] = { |
937 | { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, | 979 | { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, |
938 | { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, | 980 | { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, |
939 | { .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 }, | 981 | { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 }, |
940 | { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, | 982 | { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, |
941 | { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, | 983 | { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, |
942 | { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, | 984 | { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, |
943 | { .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 }, | 985 | { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 }, |
944 | { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, | 986 | { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, |
945 | { .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 }, | 987 | { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 }, |
946 | { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, | 988 | { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, |
947 | { .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 }, | 989 | { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 }, |
948 | { .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 }, | 990 | { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 }, |
949 | { .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c }, | 991 | { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c }, |
950 | { .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 }, | 992 | { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 }, |
951 | { .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 }, | 993 | { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 }, |
952 | { .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 }, | 994 | { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 }, |
953 | { .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac }, | 995 | { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac }, |
954 | { .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 }, | 996 | { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 }, |
955 | { .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 }, | 997 | { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 }, |
956 | { .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c }, | 998 | { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c }, |
957 | { .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 }, | 999 | { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 }, |
958 | { .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 }, | 1000 | { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 }, |
959 | { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, | 1001 | { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, |
960 | }; | 1002 | }; |
961 | 1003 | ||
962 | #ifdef CONFIG_ARCH_TEGRA_124_SOC | 1004 | #ifdef CONFIG_ARCH_TEGRA_124_SOC |
@@ -991,5 +1033,40 @@ const struct tegra_mc_soc tegra124_mc_soc = { | |||
991 | .num_address_bits = 34, | 1033 | .num_address_bits = 34, |
992 | .atom_size = 32, | 1034 | .atom_size = 32, |
993 | .smmu = &tegra124_smmu_soc, | 1035 | .smmu = &tegra124_smmu_soc, |
1036 | .emem_regs = tegra124_mc_emem_regs, | ||
1037 | .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), | ||
994 | }; | 1038 | }; |
995 | #endif /* CONFIG_ARCH_TEGRA_124_SOC */ | 1039 | #endif /* CONFIG_ARCH_TEGRA_124_SOC */ |
1040 | |||
1041 | #ifdef CONFIG_ARCH_TEGRA_132_SOC | ||
1042 | static void tegra132_flush_dcache(struct page *page, unsigned long offset, | ||
1043 | size_t size) | ||
1044 | { | ||
1045 | void *virt = page_address(page) + offset; | ||
1046 | |||
1047 | __flush_dcache_area(virt, size); | ||
1048 | } | ||
1049 | |||
1050 | static const struct tegra_smmu_ops tegra132_smmu_ops = { | ||
1051 | .flush_dcache = tegra132_flush_dcache, | ||
1052 | }; | ||
1053 | |||
1054 | static const struct tegra_smmu_soc tegra132_smmu_soc = { | ||
1055 | .clients = tegra124_mc_clients, | ||
1056 | .num_clients = ARRAY_SIZE(tegra124_mc_clients), | ||
1057 | .swgroups = tegra124_swgroups, | ||
1058 | .num_swgroups = ARRAY_SIZE(tegra124_swgroups), | ||
1059 | .supports_round_robin_arbitration = true, | ||
1060 | .supports_request_limit = true, | ||
1061 | .num_asids = 128, | ||
1062 | .ops = &tegra132_smmu_ops, | ||
1063 | }; | ||
1064 | |||
1065 | const struct tegra_mc_soc tegra132_mc_soc = { | ||
1066 | .clients = tegra124_mc_clients, | ||
1067 | .num_clients = ARRAY_SIZE(tegra124_mc_clients), | ||
1068 | .num_address_bits = 34, | ||
1069 | .atom_size = 32, | ||
1070 | .smmu = &tegra132_smmu_soc, | ||
1071 | }; | ||
1072 | #endif /* CONFIG_ARCH_TEGRA_132_SOC */ | ||
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c index 71fe9376fe53..1abcd8f6f3ba 100644 --- a/drivers/memory/tegra/tegra30.c +++ b/drivers/memory/tegra/tegra30.c | |||
@@ -918,22 +918,22 @@ static const struct tegra_mc_client tegra30_mc_clients[] = { | |||
918 | }; | 918 | }; |
919 | 919 | ||
920 | static const struct tegra_smmu_swgroup tegra30_swgroups[] = { | 920 | static const struct tegra_smmu_swgroup tegra30_swgroups[] = { |
921 | { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, | 921 | { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 }, |
922 | { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, | 922 | { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 }, |
923 | { .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 }, | 923 | { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 }, |
924 | { .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c }, | 924 | { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c }, |
925 | { .swgroup = TEGRA_SWGROUP_MPE, .reg = 0x264 }, | 925 | { .name = "mpe", .swgroup = TEGRA_SWGROUP_MPE, .reg = 0x264 }, |
926 | { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, | 926 | { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 }, |
927 | { .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 }, | 927 | { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 }, |
928 | { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, | 928 | { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c }, |
929 | { .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 }, | 929 | { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 }, |
930 | { .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c }, | 930 | { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c }, |
931 | { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, | 931 | { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 }, |
932 | { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, | 932 | { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 }, |
933 | { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, | 933 | { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 }, |
934 | { .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x278 }, | 934 | { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x278 }, |
935 | { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, | 935 | { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c }, |
936 | { .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 }, | 936 | { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 }, |
937 | }; | 937 | }; |
938 | 938 | ||
939 | static void tegra30_flush_dcache(struct page *page, unsigned long offset, | 939 | static void tegra30_flush_dcache(struct page *page, unsigned long offset, |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index a01f57c9e34e..ddf8e42c9367 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | const struct of_device_id of_default_bus_match_table[] = { | 26 | const struct of_device_id of_default_bus_match_table[] = { |
27 | { .compatible = "simple-bus", }, | 27 | { .compatible = "simple-bus", }, |
28 | { .compatible = "simple-mfd", }, | ||
28 | #ifdef CONFIG_ARM_AMBA | 29 | #ifdef CONFIG_ARM_AMBA |
29 | { .compatible = "arm,amba-bus", }, | 30 | { .compatible = "arm,amba-bus", }, |
30 | #endif /* CONFIG_ARM_AMBA */ | 31 | #endif /* CONFIG_ARM_AMBA */ |
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c index b71a6fffef1b..3769eaedf519 100644 --- a/drivers/pinctrl/berlin/berlin-bg2.c +++ b/drivers/pinctrl/berlin/berlin-bg2.c | |||
@@ -218,11 +218,11 @@ static const struct berlin_pinctrl_desc berlin2_sysmgr_pinctrl_data = { | |||
218 | 218 | ||
219 | static const struct of_device_id berlin2_pinctrl_match[] = { | 219 | static const struct of_device_id berlin2_pinctrl_match[] = { |
220 | { | 220 | { |
221 | .compatible = "marvell,berlin2-chip-ctrl", | 221 | .compatible = "marvell,berlin2-soc-pinctrl", |
222 | .data = &berlin2_soc_pinctrl_data | 222 | .data = &berlin2_soc_pinctrl_data |
223 | }, | 223 | }, |
224 | { | 224 | { |
225 | .compatible = "marvell,berlin2-system-ctrl", | 225 | .compatible = "marvell,berlin2-system-pinctrl", |
226 | .data = &berlin2_sysmgr_pinctrl_data | 226 | .data = &berlin2_sysmgr_pinctrl_data |
227 | }, | 227 | }, |
228 | {} | 228 | {} |
@@ -233,28 +233,6 @@ static int berlin2_pinctrl_probe(struct platform_device *pdev) | |||
233 | { | 233 | { |
234 | const struct of_device_id *match = | 234 | const struct of_device_id *match = |
235 | of_match_device(berlin2_pinctrl_match, &pdev->dev); | 235 | of_match_device(berlin2_pinctrl_match, &pdev->dev); |
236 | struct regmap_config *rmconfig; | ||
237 | struct regmap *regmap; | ||
238 | struct resource *res; | ||
239 | void __iomem *base; | ||
240 | |||
241 | rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL); | ||
242 | if (!rmconfig) | ||
243 | return -ENOMEM; | ||
244 | |||
245 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
246 | base = devm_ioremap_resource(&pdev->dev, res); | ||
247 | if (IS_ERR(base)) | ||
248 | return PTR_ERR(base); | ||
249 | |||
250 | rmconfig->reg_bits = 32, | ||
251 | rmconfig->val_bits = 32, | ||
252 | rmconfig->reg_stride = 4, | ||
253 | rmconfig->max_register = resource_size(res); | ||
254 | |||
255 | regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig); | ||
256 | if (IS_ERR(regmap)) | ||
257 | return PTR_ERR(regmap); | ||
258 | 236 | ||
259 | return berlin_pinctrl_probe(pdev, match->data); | 237 | return berlin_pinctrl_probe(pdev, match->data); |
260 | } | 238 | } |
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c index 19ac5a22c947..9e11f191d643 100644 --- a/drivers/pinctrl/berlin/berlin-bg2cd.c +++ b/drivers/pinctrl/berlin/berlin-bg2cd.c | |||
@@ -161,11 +161,11 @@ static const struct berlin_pinctrl_desc berlin2cd_sysmgr_pinctrl_data = { | |||
161 | 161 | ||
162 | static const struct of_device_id berlin2cd_pinctrl_match[] = { | 162 | static const struct of_device_id berlin2cd_pinctrl_match[] = { |
163 | { | 163 | { |
164 | .compatible = "marvell,berlin2cd-chip-ctrl", | 164 | .compatible = "marvell,berlin2cd-soc-pinctrl", |
165 | .data = &berlin2cd_soc_pinctrl_data | 165 | .data = &berlin2cd_soc_pinctrl_data |
166 | }, | 166 | }, |
167 | { | 167 | { |
168 | .compatible = "marvell,berlin2cd-system-ctrl", | 168 | .compatible = "marvell,berlin2cd-system-pinctrl", |
169 | .data = &berlin2cd_sysmgr_pinctrl_data | 169 | .data = &berlin2cd_sysmgr_pinctrl_data |
170 | }, | 170 | }, |
171 | {} | 171 | {} |
@@ -176,28 +176,6 @@ static int berlin2cd_pinctrl_probe(struct platform_device *pdev) | |||
176 | { | 176 | { |
177 | const struct of_device_id *match = | 177 | const struct of_device_id *match = |
178 | of_match_device(berlin2cd_pinctrl_match, &pdev->dev); | 178 | of_match_device(berlin2cd_pinctrl_match, &pdev->dev); |
179 | struct regmap_config *rmconfig; | ||
180 | struct regmap *regmap; | ||
181 | struct resource *res; | ||
182 | void __iomem *base; | ||
183 | |||
184 | rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL); | ||
185 | if (!rmconfig) | ||
186 | return -ENOMEM; | ||
187 | |||
188 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
189 | base = devm_ioremap_resource(&pdev->dev, res); | ||
190 | if (IS_ERR(base)) | ||
191 | return PTR_ERR(base); | ||
192 | |||
193 | rmconfig->reg_bits = 32, | ||
194 | rmconfig->val_bits = 32, | ||
195 | rmconfig->reg_stride = 4, | ||
196 | rmconfig->max_register = resource_size(res); | ||
197 | |||
198 | regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig); | ||
199 | if (IS_ERR(regmap)) | ||
200 | return PTR_ERR(regmap); | ||
201 | 179 | ||
202 | return berlin_pinctrl_probe(pdev, match->data); | 180 | return berlin_pinctrl_probe(pdev, match->data); |
203 | } | 181 | } |
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c index bd9662e57ad3..ba7a8a8ad010 100644 --- a/drivers/pinctrl/berlin/berlin-bg2q.c +++ b/drivers/pinctrl/berlin/berlin-bg2q.c | |||
@@ -380,11 +380,11 @@ static const struct berlin_pinctrl_desc berlin2q_sysmgr_pinctrl_data = { | |||
380 | 380 | ||
381 | static const struct of_device_id berlin2q_pinctrl_match[] = { | 381 | static const struct of_device_id berlin2q_pinctrl_match[] = { |
382 | { | 382 | { |
383 | .compatible = "marvell,berlin2q-chip-ctrl", | 383 | .compatible = "marvell,berlin2q-soc-pinctrl", |
384 | .data = &berlin2q_soc_pinctrl_data, | 384 | .data = &berlin2q_soc_pinctrl_data, |
385 | }, | 385 | }, |
386 | { | 386 | { |
387 | .compatible = "marvell,berlin2q-system-ctrl", | 387 | .compatible = "marvell,berlin2q-system-pinctrl", |
388 | .data = &berlin2q_sysmgr_pinctrl_data, | 388 | .data = &berlin2q_sysmgr_pinctrl_data, |
389 | }, | 389 | }, |
390 | {} | 390 | {} |
@@ -395,28 +395,6 @@ static int berlin2q_pinctrl_probe(struct platform_device *pdev) | |||
395 | { | 395 | { |
396 | const struct of_device_id *match = | 396 | const struct of_device_id *match = |
397 | of_match_device(berlin2q_pinctrl_match, &pdev->dev); | 397 | of_match_device(berlin2q_pinctrl_match, &pdev->dev); |
398 | struct regmap_config *rmconfig; | ||
399 | struct regmap *regmap; | ||
400 | struct resource *res; | ||
401 | void __iomem *base; | ||
402 | |||
403 | rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL); | ||
404 | if (!rmconfig) | ||
405 | return -ENOMEM; | ||
406 | |||
407 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
408 | base = devm_ioremap_resource(&pdev->dev, res); | ||
409 | if (IS_ERR(base)) | ||
410 | return PTR_ERR(base); | ||
411 | |||
412 | rmconfig->reg_bits = 32, | ||
413 | rmconfig->val_bits = 32, | ||
414 | rmconfig->reg_stride = 4, | ||
415 | rmconfig->max_register = resource_size(res); | ||
416 | |||
417 | regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig); | ||
418 | if (IS_ERR(regmap)) | ||
419 | return PTR_ERR(regmap); | ||
420 | 398 | ||
421 | return berlin_pinctrl_probe(pdev, match->data); | 399 | return berlin_pinctrl_probe(pdev, match->data); |
422 | } | 400 | } |
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c index 7f0b0f93242b..65b0e211b89e 100644 --- a/drivers/pinctrl/berlin/berlin.c +++ b/drivers/pinctrl/berlin/berlin.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/mfd/syscon.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/of.h> | 16 | #include <linux/of.h> |
16 | #include <linux/of_address.h> | 17 | #include <linux/of_address.h> |
@@ -295,13 +296,15 @@ int berlin_pinctrl_probe(struct platform_device *pdev, | |||
295 | const struct berlin_pinctrl_desc *desc) | 296 | const struct berlin_pinctrl_desc *desc) |
296 | { | 297 | { |
297 | struct device *dev = &pdev->dev; | 298 | struct device *dev = &pdev->dev; |
299 | struct device_node *parent_np = of_get_parent(dev->of_node); | ||
298 | struct berlin_pinctrl *pctrl; | 300 | struct berlin_pinctrl *pctrl; |
299 | struct regmap *regmap; | 301 | struct regmap *regmap; |
300 | int ret; | 302 | int ret; |
301 | 303 | ||
302 | regmap = dev_get_regmap(&pdev->dev, NULL); | 304 | regmap = syscon_node_to_regmap(parent_np); |
303 | if (!regmap) | 305 | of_node_put(parent_np); |
304 | return -ENODEV; | 306 | if (IS_ERR(regmap)) |
307 | return PTR_ERR(regmap); | ||
305 | 308 | ||
306 | pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); | 309 | pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); |
307 | if (!pctrl) | 310 | if (!pctrl) |
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c index f8b48a13cf0b..3c922d37255c 100644 --- a/drivers/reset/reset-berlin.c +++ b/drivers/reset/reset-berlin.c | |||
@@ -11,10 +11,12 @@ | |||
11 | 11 | ||
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/mfd/syscon.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/of.h> | 16 | #include <linux/of.h> |
16 | #include <linux/of_address.h> | 17 | #include <linux/of_address.h> |
17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/regmap.h> | ||
18 | #include <linux/reset-controller.h> | 20 | #include <linux/reset-controller.h> |
19 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
20 | #include <linux/types.h> | 22 | #include <linux/types.h> |
@@ -25,8 +27,7 @@ | |||
25 | container_of((p), struct berlin_reset_priv, rcdev) | 27 | container_of((p), struct berlin_reset_priv, rcdev) |
26 | 28 | ||
27 | struct berlin_reset_priv { | 29 | struct berlin_reset_priv { |
28 | void __iomem *base; | 30 | struct regmap *regmap; |
29 | unsigned int size; | ||
30 | struct reset_controller_dev rcdev; | 31 | struct reset_controller_dev rcdev; |
31 | }; | 32 | }; |
32 | 33 | ||
@@ -37,7 +38,7 @@ static int berlin_reset_reset(struct reset_controller_dev *rcdev, | |||
37 | int offset = id >> 8; | 38 | int offset = id >> 8; |
38 | int mask = BIT(id & 0x1f); | 39 | int mask = BIT(id & 0x1f); |
39 | 40 | ||
40 | writel(mask, priv->base + offset); | 41 | regmap_write(priv->regmap, offset, mask); |
41 | 42 | ||
42 | /* let the reset be effective */ | 43 | /* let the reset be effective */ |
43 | udelay(10); | 44 | udelay(10); |
@@ -52,7 +53,6 @@ static struct reset_control_ops berlin_reset_ops = { | |||
52 | static int berlin_reset_xlate(struct reset_controller_dev *rcdev, | 53 | static int berlin_reset_xlate(struct reset_controller_dev *rcdev, |
53 | const struct of_phandle_args *reset_spec) | 54 | const struct of_phandle_args *reset_spec) |
54 | { | 55 | { |
55 | struct berlin_reset_priv *priv = to_berlin_reset_priv(rcdev); | ||
56 | unsigned offset, bit; | 56 | unsigned offset, bit; |
57 | 57 | ||
58 | if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells)) | 58 | if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells)) |
@@ -61,71 +61,53 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev, | |||
61 | offset = reset_spec->args[0]; | 61 | offset = reset_spec->args[0]; |
62 | bit = reset_spec->args[1]; | 62 | bit = reset_spec->args[1]; |
63 | 63 | ||
64 | if (offset >= priv->size) | ||
65 | return -EINVAL; | ||
66 | |||
67 | if (bit >= BERLIN_MAX_RESETS) | 64 | if (bit >= BERLIN_MAX_RESETS) |
68 | return -EINVAL; | 65 | return -EINVAL; |
69 | 66 | ||
70 | return (offset << 8) | bit; | 67 | return (offset << 8) | bit; |
71 | } | 68 | } |
72 | 69 | ||
73 | static int __berlin_reset_init(struct device_node *np) | 70 | static int berlin2_reset_probe(struct platform_device *pdev) |
74 | { | 71 | { |
72 | struct device_node *parent_np = of_get_parent(pdev->dev.of_node); | ||
75 | struct berlin_reset_priv *priv; | 73 | struct berlin_reset_priv *priv; |
76 | struct resource res; | ||
77 | resource_size_t size; | ||
78 | int ret; | ||
79 | 74 | ||
80 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 75 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
81 | if (!priv) | 76 | if (!priv) |
82 | return -ENOMEM; | 77 | return -ENOMEM; |
83 | 78 | ||
84 | ret = of_address_to_resource(np, 0, &res); | 79 | priv->regmap = syscon_node_to_regmap(parent_np); |
85 | if (ret) | 80 | of_node_put(parent_np); |
86 | goto err; | 81 | if (IS_ERR(priv->regmap)) |
87 | 82 | return PTR_ERR(priv->regmap); | |
88 | size = resource_size(&res); | ||
89 | priv->base = ioremap(res.start, size); | ||
90 | if (!priv->base) { | ||
91 | ret = -ENOMEM; | ||
92 | goto err; | ||
93 | } | ||
94 | priv->size = size; | ||
95 | 83 | ||
96 | priv->rcdev.owner = THIS_MODULE; | 84 | priv->rcdev.owner = THIS_MODULE; |
97 | priv->rcdev.ops = &berlin_reset_ops; | 85 | priv->rcdev.ops = &berlin_reset_ops; |
98 | priv->rcdev.of_node = np; | 86 | priv->rcdev.of_node = pdev->dev.of_node; |
99 | priv->rcdev.of_reset_n_cells = 2; | 87 | priv->rcdev.of_reset_n_cells = 2; |
100 | priv->rcdev.of_xlate = berlin_reset_xlate; | 88 | priv->rcdev.of_xlate = berlin_reset_xlate; |
101 | 89 | ||
102 | reset_controller_register(&priv->rcdev); | 90 | reset_controller_register(&priv->rcdev); |
103 | 91 | ||
104 | return 0; | 92 | return 0; |
105 | |||
106 | err: | ||
107 | kfree(priv); | ||
108 | return ret; | ||
109 | } | 93 | } |
110 | 94 | ||
111 | static const struct of_device_id berlin_reset_of_match[] __initconst = { | 95 | static const struct of_device_id berlin_reset_dt_match[] = { |
112 | { .compatible = "marvell,berlin2-chip-ctrl" }, | 96 | { .compatible = "marvell,berlin2-reset" }, |
113 | { .compatible = "marvell,berlin2cd-chip-ctrl" }, | ||
114 | { .compatible = "marvell,berlin2q-chip-ctrl" }, | ||
115 | { }, | 97 | { }, |
116 | }; | 98 | }; |
99 | MODULE_DEVICE_TABLE(of, berlin_reset_dt_match); | ||
100 | |||
101 | static struct platform_driver berlin_reset_driver = { | ||
102 | .probe = berlin2_reset_probe, | ||
103 | .driver = { | ||
104 | .name = "berlin2-reset", | ||
105 | .of_match_table = berlin_reset_dt_match, | ||
106 | }, | ||
107 | }; | ||
108 | module_platform_driver(berlin_reset_driver); | ||
117 | 109 | ||
118 | static int __init berlin_reset_init(void) | 110 | MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); |
119 | { | 111 | MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>"); |
120 | struct device_node *np; | 112 | MODULE_DESCRIPTION("Marvell Berlin reset driver"); |
121 | int ret; | 113 | MODULE_LICENSE("GPL"); |
122 | |||
123 | for_each_matching_node(np, berlin_reset_of_match) { | ||
124 | ret = __berlin_reset_init(np); | ||
125 | if (ret) | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | arch_initcall(berlin_reset_init); | ||
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index d8bde82f0370..96ddecb92254 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig | |||
@@ -2,6 +2,7 @@ menu "SOC (System On Chip) specific Drivers" | |||
2 | 2 | ||
3 | source "drivers/soc/mediatek/Kconfig" | 3 | source "drivers/soc/mediatek/Kconfig" |
4 | source "drivers/soc/qcom/Kconfig" | 4 | source "drivers/soc/qcom/Kconfig" |
5 | source "drivers/soc/sunxi/Kconfig" | ||
5 | source "drivers/soc/ti/Kconfig" | 6 | source "drivers/soc/ti/Kconfig" |
6 | source "drivers/soc/versatile/Kconfig" | 7 | source "drivers/soc/versatile/Kconfig" |
7 | 8 | ||
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 70042b259744..7dc7c0d8a2c1 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ | 5 | obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ |
6 | obj-$(CONFIG_ARCH_QCOM) += qcom/ | 6 | obj-$(CONFIG_ARCH_QCOM) += qcom/ |
7 | obj-$(CONFIG_ARCH_SUNXI) += sunxi/ | ||
7 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 8 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
8 | obj-$(CONFIG_SOC_TI) += ti/ | 9 | obj-$(CONFIG_SOC_TI) += ti/ |
9 | obj-$(CONFIG_PLAT_VERSATILE) += versatile/ | 10 | obj-$(CONFIG_PLAT_VERSATILE) += versatile/ |
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 460b2dba109c..5eea374c8fa6 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig | |||
@@ -10,3 +10,10 @@ config QCOM_GSBI | |||
10 | functions for connecting the underlying serial UART, SPI, and I2C | 10 | functions for connecting the underlying serial UART, SPI, and I2C |
11 | devices to the output pins. | 11 | devices to the output pins. |
12 | 12 | ||
13 | config QCOM_PM | ||
14 | bool "Qualcomm Power Management" | ||
15 | depends on ARCH_QCOM && !ARM64 | ||
16 | help | ||
17 | QCOM Platform specific power driver to manage cores and L2 low power | ||
18 | modes. It interface with various system drivers to put the cores in | ||
19 | low power modes. | ||
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 438901257ac1..931d385386c5 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o | 1 | obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o |
2 | obj-$(CONFIG_QCOM_PM) += spm.o | ||
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c new file mode 100644 index 000000000000..b562af816c0a --- /dev/null +++ b/drivers/soc/qcom/spm.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. | ||
3 | * Copyright (c) 2014,2015, Linaro Ltd. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 and | ||
7 | * only version 2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/of_address.h> | ||
22 | #include <linux/of_device.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/cpuidle.h> | ||
26 | #include <linux/cpu_pm.h> | ||
27 | #include <linux/qcom_scm.h> | ||
28 | |||
29 | #include <asm/cpuidle.h> | ||
30 | #include <asm/proc-fns.h> | ||
31 | #include <asm/suspend.h> | ||
32 | |||
33 | #define MAX_PMIC_DATA 2 | ||
34 | #define MAX_SEQ_DATA 64 | ||
35 | #define SPM_CTL_INDEX 0x7f | ||
36 | #define SPM_CTL_INDEX_SHIFT 4 | ||
37 | #define SPM_CTL_EN BIT(0) | ||
38 | |||
39 | enum pm_sleep_mode { | ||
40 | PM_SLEEP_MODE_STBY, | ||
41 | PM_SLEEP_MODE_RET, | ||
42 | PM_SLEEP_MODE_SPC, | ||
43 | PM_SLEEP_MODE_PC, | ||
44 | PM_SLEEP_MODE_NR, | ||
45 | }; | ||
46 | |||
47 | enum spm_reg { | ||
48 | SPM_REG_CFG, | ||
49 | SPM_REG_SPM_CTL, | ||
50 | SPM_REG_DLY, | ||
51 | SPM_REG_PMIC_DLY, | ||
52 | SPM_REG_PMIC_DATA_0, | ||
53 | SPM_REG_PMIC_DATA_1, | ||
54 | SPM_REG_VCTL, | ||
55 | SPM_REG_SEQ_ENTRY, | ||
56 | SPM_REG_SPM_STS, | ||
57 | SPM_REG_PMIC_STS, | ||
58 | SPM_REG_NR, | ||
59 | }; | ||
60 | |||
61 | struct spm_reg_data { | ||
62 | const u8 *reg_offset; | ||
63 | u32 spm_cfg; | ||
64 | u32 spm_dly; | ||
65 | u32 pmic_dly; | ||
66 | u32 pmic_data[MAX_PMIC_DATA]; | ||
67 | u8 seq[MAX_SEQ_DATA]; | ||
68 | u8 start_index[PM_SLEEP_MODE_NR]; | ||
69 | }; | ||
70 | |||
71 | struct spm_driver_data { | ||
72 | void __iomem *reg_base; | ||
73 | const struct spm_reg_data *reg_data; | ||
74 | }; | ||
75 | |||
76 | static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = { | ||
77 | [SPM_REG_CFG] = 0x08, | ||
78 | [SPM_REG_SPM_CTL] = 0x30, | ||
79 | [SPM_REG_DLY] = 0x34, | ||
80 | [SPM_REG_SEQ_ENTRY] = 0x80, | ||
81 | }; | ||
82 | |||
83 | /* SPM register data for 8974, 8084 */ | ||
84 | static const struct spm_reg_data spm_reg_8974_8084_cpu = { | ||
85 | .reg_offset = spm_reg_offset_v2_1, | ||
86 | .spm_cfg = 0x1, | ||
87 | .spm_dly = 0x3C102800, | ||
88 | .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03, | ||
89 | 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30, | ||
90 | 0x0F }, | ||
91 | .start_index[PM_SLEEP_MODE_STBY] = 0, | ||
92 | .start_index[PM_SLEEP_MODE_SPC] = 3, | ||
93 | }; | ||
94 | |||
95 | static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = { | ||
96 | [SPM_REG_CFG] = 0x08, | ||
97 | [SPM_REG_SPM_CTL] = 0x20, | ||
98 | [SPM_REG_PMIC_DLY] = 0x24, | ||
99 | [SPM_REG_PMIC_DATA_0] = 0x28, | ||
100 | [SPM_REG_PMIC_DATA_1] = 0x2C, | ||
101 | [SPM_REG_SEQ_ENTRY] = 0x80, | ||
102 | }; | ||
103 | |||
104 | /* SPM register data for 8064 */ | ||
105 | static const struct spm_reg_data spm_reg_8064_cpu = { | ||
106 | .reg_offset = spm_reg_offset_v1_1, | ||
107 | .spm_cfg = 0x1F, | ||
108 | .pmic_dly = 0x02020004, | ||
109 | .pmic_data[0] = 0x0084009C, | ||
110 | .pmic_data[1] = 0x00A4001C, | ||
111 | .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01, | ||
112 | 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F }, | ||
113 | .start_index[PM_SLEEP_MODE_STBY] = 0, | ||
114 | .start_index[PM_SLEEP_MODE_SPC] = 2, | ||
115 | }; | ||
116 | |||
117 | static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv); | ||
118 | |||
119 | typedef int (*idle_fn)(int); | ||
120 | static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops); | ||
121 | |||
122 | static inline void spm_register_write(struct spm_driver_data *drv, | ||
123 | enum spm_reg reg, u32 val) | ||
124 | { | ||
125 | if (drv->reg_data->reg_offset[reg]) | ||
126 | writel_relaxed(val, drv->reg_base + | ||
127 | drv->reg_data->reg_offset[reg]); | ||
128 | } | ||
129 | |||
130 | /* Ensure a guaranteed write, before return */ | ||
131 | static inline void spm_register_write_sync(struct spm_driver_data *drv, | ||
132 | enum spm_reg reg, u32 val) | ||
133 | { | ||
134 | u32 ret; | ||
135 | |||
136 | if (!drv->reg_data->reg_offset[reg]) | ||
137 | return; | ||
138 | |||
139 | do { | ||
140 | writel_relaxed(val, drv->reg_base + | ||
141 | drv->reg_data->reg_offset[reg]); | ||
142 | ret = readl_relaxed(drv->reg_base + | ||
143 | drv->reg_data->reg_offset[reg]); | ||
144 | if (ret == val) | ||
145 | break; | ||
146 | cpu_relax(); | ||
147 | } while (1); | ||
148 | } | ||
149 | |||
150 | static inline u32 spm_register_read(struct spm_driver_data *drv, | ||
151 | enum spm_reg reg) | ||
152 | { | ||
153 | return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]); | ||
154 | } | ||
155 | |||
156 | static void spm_set_low_power_mode(struct spm_driver_data *drv, | ||
157 | enum pm_sleep_mode mode) | ||
158 | { | ||
159 | u32 start_index; | ||
160 | u32 ctl_val; | ||
161 | |||
162 | start_index = drv->reg_data->start_index[mode]; | ||
163 | |||
164 | ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL); | ||
165 | ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT); | ||
166 | ctl_val |= start_index << SPM_CTL_INDEX_SHIFT; | ||
167 | ctl_val |= SPM_CTL_EN; | ||
168 | spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val); | ||
169 | } | ||
170 | |||
171 | static int qcom_pm_collapse(unsigned long int unused) | ||
172 | { | ||
173 | qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON); | ||
174 | |||
175 | /* | ||
176 | * Returns here only if there was a pending interrupt and we did not | ||
177 | * power down as a result. | ||
178 | */ | ||
179 | return -1; | ||
180 | } | ||
181 | |||
182 | static int qcom_cpu_spc(int cpu) | ||
183 | { | ||
184 | int ret; | ||
185 | struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); | ||
186 | |||
187 | spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC); | ||
188 | ret = cpu_suspend(0, qcom_pm_collapse); | ||
189 | /* | ||
190 | * ARM common code executes WFI without calling into our driver and | ||
191 | * if the SPM mode is not reset, then we may accidently power down the | ||
192 | * cpu when we intended only to gate the cpu clock. | ||
193 | * Ensure the state is set to standby before returning. | ||
194 | */ | ||
195 | spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY); | ||
196 | |||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static int qcom_idle_enter(int cpu, unsigned long index) | ||
201 | { | ||
202 | return per_cpu(qcom_idle_ops, cpu)[index](cpu); | ||
203 | } | ||
204 | |||
205 | static const struct of_device_id qcom_idle_state_match[] __initconst = { | ||
206 | { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc }, | ||
207 | { }, | ||
208 | }; | ||
209 | |||
210 | static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu) | ||
211 | { | ||
212 | const struct of_device_id *match_id; | ||
213 | struct device_node *state_node; | ||
214 | int i; | ||
215 | int state_count = 1; | ||
216 | idle_fn idle_fns[CPUIDLE_STATE_MAX]; | ||
217 | idle_fn *fns; | ||
218 | cpumask_t mask; | ||
219 | bool use_scm_power_down = false; | ||
220 | |||
221 | for (i = 0; ; i++) { | ||
222 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
223 | if (!state_node) | ||
224 | break; | ||
225 | |||
226 | if (!of_device_is_available(state_node)) | ||
227 | continue; | ||
228 | |||
229 | if (i == CPUIDLE_STATE_MAX) { | ||
230 | pr_warn("%s: cpuidle states reached max possible\n", | ||
231 | __func__); | ||
232 | break; | ||
233 | } | ||
234 | |||
235 | match_id = of_match_node(qcom_idle_state_match, state_node); | ||
236 | if (!match_id) | ||
237 | return -ENODEV; | ||
238 | |||
239 | idle_fns[state_count] = match_id->data; | ||
240 | |||
241 | /* Check if any of the states allow power down */ | ||
242 | if (match_id->data == qcom_cpu_spc) | ||
243 | use_scm_power_down = true; | ||
244 | |||
245 | state_count++; | ||
246 | } | ||
247 | |||
248 | if (state_count == 1) | ||
249 | goto check_spm; | ||
250 | |||
251 | fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns), | ||
252 | GFP_KERNEL); | ||
253 | if (!fns) | ||
254 | return -ENOMEM; | ||
255 | |||
256 | for (i = 1; i < state_count; i++) | ||
257 | fns[i] = idle_fns[i]; | ||
258 | |||
259 | if (use_scm_power_down) { | ||
260 | /* We have atleast one power down mode */ | ||
261 | cpumask_clear(&mask); | ||
262 | cpumask_set_cpu(cpu, &mask); | ||
263 | qcom_scm_set_warm_boot_addr(cpu_resume, &mask); | ||
264 | } | ||
265 | |||
266 | per_cpu(qcom_idle_ops, cpu) = fns; | ||
267 | |||
268 | /* | ||
269 | * SPM probe for the cpu should have happened by now, if the | ||
270 | * SPM device does not exist, return -ENXIO to indicate that the | ||
271 | * cpu does not support idle states. | ||
272 | */ | ||
273 | check_spm: | ||
274 | return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO; | ||
275 | } | ||
276 | |||
277 | static struct cpuidle_ops qcom_cpuidle_ops __initdata = { | ||
278 | .suspend = qcom_idle_enter, | ||
279 | .init = qcom_cpuidle_init, | ||
280 | }; | ||
281 | |||
282 | CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops); | ||
283 | CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops); | ||
284 | |||
285 | static struct spm_driver_data *spm_get_drv(struct platform_device *pdev, | ||
286 | int *spm_cpu) | ||
287 | { | ||
288 | struct spm_driver_data *drv = NULL; | ||
289 | struct device_node *cpu_node, *saw_node; | ||
290 | int cpu; | ||
291 | bool found; | ||
292 | |||
293 | for_each_possible_cpu(cpu) { | ||
294 | cpu_node = of_cpu_device_node_get(cpu); | ||
295 | if (!cpu_node) | ||
296 | continue; | ||
297 | saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); | ||
298 | found = (saw_node == pdev->dev.of_node); | ||
299 | of_node_put(saw_node); | ||
300 | of_node_put(cpu_node); | ||
301 | if (found) | ||
302 | break; | ||
303 | } | ||
304 | |||
305 | if (found) { | ||
306 | drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); | ||
307 | if (drv) | ||
308 | *spm_cpu = cpu; | ||
309 | } | ||
310 | |||
311 | return drv; | ||
312 | } | ||
313 | |||
314 | static const struct of_device_id spm_match_table[] = { | ||
315 | { .compatible = "qcom,msm8974-saw2-v2.1-cpu", | ||
316 | .data = &spm_reg_8974_8084_cpu }, | ||
317 | { .compatible = "qcom,apq8084-saw2-v2.1-cpu", | ||
318 | .data = &spm_reg_8974_8084_cpu }, | ||
319 | { .compatible = "qcom,apq8064-saw2-v1.1-cpu", | ||
320 | .data = &spm_reg_8064_cpu }, | ||
321 | { }, | ||
322 | }; | ||
323 | |||
324 | static int spm_dev_probe(struct platform_device *pdev) | ||
325 | { | ||
326 | struct spm_driver_data *drv; | ||
327 | struct resource *res; | ||
328 | const struct of_device_id *match_id; | ||
329 | void __iomem *addr; | ||
330 | int cpu; | ||
331 | |||
332 | drv = spm_get_drv(pdev, &cpu); | ||
333 | if (!drv) | ||
334 | return -EINVAL; | ||
335 | |||
336 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
337 | drv->reg_base = devm_ioremap_resource(&pdev->dev, res); | ||
338 | if (IS_ERR(drv->reg_base)) | ||
339 | return PTR_ERR(drv->reg_base); | ||
340 | |||
341 | match_id = of_match_node(spm_match_table, pdev->dev.of_node); | ||
342 | if (!match_id) | ||
343 | return -ENODEV; | ||
344 | |||
345 | drv->reg_data = match_id->data; | ||
346 | |||
347 | /* Write the SPM sequences first.. */ | ||
348 | addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY]; | ||
349 | __iowrite32_copy(addr, drv->reg_data->seq, | ||
350 | ARRAY_SIZE(drv->reg_data->seq) / 4); | ||
351 | |||
352 | /* | ||
353 | * ..and then the control registers. | ||
354 | * On some SoC if the control registers are written first and if the | ||
355 | * CPU was held in reset, the reset signal could trigger the SPM state | ||
356 | * machine, before the sequences are completely written. | ||
357 | */ | ||
358 | spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg); | ||
359 | spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly); | ||
360 | spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly); | ||
361 | spm_register_write(drv, SPM_REG_PMIC_DATA_0, | ||
362 | drv->reg_data->pmic_data[0]); | ||
363 | spm_register_write(drv, SPM_REG_PMIC_DATA_1, | ||
364 | drv->reg_data->pmic_data[1]); | ||
365 | |||
366 | /* Set up Standby as the default low power mode */ | ||
367 | spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY); | ||
368 | |||
369 | per_cpu(cpu_spm_drv, cpu) = drv; | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static struct platform_driver spm_driver = { | ||
375 | .probe = spm_dev_probe, | ||
376 | .driver = { | ||
377 | .name = "saw", | ||
378 | .of_match_table = spm_match_table, | ||
379 | }, | ||
380 | }; | ||
381 | module_platform_driver(spm_driver); | ||
382 | |||
383 | MODULE_LICENSE("GPL v2"); | ||
384 | MODULE_DESCRIPTION("SAW power controller driver"); | ||
385 | MODULE_ALIAS("platform:saw"); | ||
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig new file mode 100644 index 000000000000..353b07e40176 --- /dev/null +++ b/drivers/soc/sunxi/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Allwinner sunXi SoC drivers | ||
3 | # | ||
4 | config SUNXI_SRAM | ||
5 | bool | ||
6 | default ARCH_SUNXI | ||
7 | help | ||
8 | Say y here to enable the SRAM controller support. This | ||
9 | device is responsible on mapping the SRAM in the sunXi SoCs | ||
10 | whether to the CPU/DMA, or to the devices. | ||
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile new file mode 100644 index 000000000000..4cf9dbdf346e --- /dev/null +++ b/drivers/soc/sunxi/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o | |||
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c new file mode 100644 index 000000000000..bc52670c8f4b --- /dev/null +++ b/drivers/soc/sunxi/sunxi_sram.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * Allwinner SoCs SRAM Controller Driver | ||
3 | * | ||
4 | * Copyright (C) 2015 Maxime Ripard | ||
5 | * | ||
6 | * Author: Maxime Ripard <maxime.ripard@free-electrons.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/of_address.h> | ||
18 | #include <linux/of_device.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #include <linux/soc/sunxi/sunxi_sram.h> | ||
22 | |||
23 | struct sunxi_sram_func { | ||
24 | char *func; | ||
25 | u8 val; | ||
26 | }; | ||
27 | |||
28 | struct sunxi_sram_data { | ||
29 | char *name; | ||
30 | u8 reg; | ||
31 | u8 offset; | ||
32 | u8 width; | ||
33 | struct sunxi_sram_func *func; | ||
34 | struct list_head list; | ||
35 | }; | ||
36 | |||
37 | struct sunxi_sram_desc { | ||
38 | struct sunxi_sram_data data; | ||
39 | bool claimed; | ||
40 | }; | ||
41 | |||
42 | #define SUNXI_SRAM_MAP(_val, _func) \ | ||
43 | { \ | ||
44 | .func = _func, \ | ||
45 | .val = _val, \ | ||
46 | } | ||
47 | |||
48 | #define SUNXI_SRAM_DATA(_name, _reg, _off, _width, ...) \ | ||
49 | { \ | ||
50 | .name = _name, \ | ||
51 | .reg = _reg, \ | ||
52 | .offset = _off, \ | ||
53 | .width = _width, \ | ||
54 | .func = (struct sunxi_sram_func[]){ \ | ||
55 | __VA_ARGS__, { } }, \ | ||
56 | } | ||
57 | |||
58 | static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = { | ||
59 | .data = SUNXI_SRAM_DATA("A3-A4", 0x4, 0x4, 2, | ||
60 | SUNXI_SRAM_MAP(0, "cpu"), | ||
61 | SUNXI_SRAM_MAP(1, "emac")), | ||
62 | }; | ||
63 | |||
64 | static struct sunxi_sram_desc sun4i_a10_sram_d = { | ||
65 | .data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1, | ||
66 | SUNXI_SRAM_MAP(0, "cpu"), | ||
67 | SUNXI_SRAM_MAP(1, "usb-otg")), | ||
68 | }; | ||
69 | |||
70 | static const struct of_device_id sunxi_sram_dt_ids[] = { | ||
71 | { | ||
72 | .compatible = "allwinner,sun4i-a10-sram-a3-a4", | ||
73 | .data = &sun4i_a10_sram_a3_a4.data, | ||
74 | }, | ||
75 | { | ||
76 | .compatible = "allwinner,sun4i-a10-sram-d", | ||
77 | .data = &sun4i_a10_sram_d.data, | ||
78 | }, | ||
79 | {} | ||
80 | }; | ||
81 | |||
82 | static struct device *sram_dev; | ||
83 | static LIST_HEAD(claimed_sram); | ||
84 | static DEFINE_SPINLOCK(sram_lock); | ||
85 | static void __iomem *base; | ||
86 | |||
87 | static int sunxi_sram_show(struct seq_file *s, void *data) | ||
88 | { | ||
89 | struct device_node *sram_node, *section_node; | ||
90 | const struct sunxi_sram_data *sram_data; | ||
91 | const struct of_device_id *match; | ||
92 | struct sunxi_sram_func *func; | ||
93 | const __be32 *sram_addr_p, *section_addr_p; | ||
94 | u32 val; | ||
95 | |||
96 | seq_puts(s, "Allwinner sunXi SRAM\n"); | ||
97 | seq_puts(s, "--------------------\n\n"); | ||
98 | |||
99 | for_each_child_of_node(sram_dev->of_node, sram_node) { | ||
100 | sram_addr_p = of_get_address(sram_node, 0, NULL, NULL); | ||
101 | |||
102 | seq_printf(s, "sram@%08x\n", | ||
103 | be32_to_cpu(*sram_addr_p)); | ||
104 | |||
105 | for_each_child_of_node(sram_node, section_node) { | ||
106 | match = of_match_node(sunxi_sram_dt_ids, section_node); | ||
107 | if (!match) | ||
108 | continue; | ||
109 | sram_data = match->data; | ||
110 | |||
111 | section_addr_p = of_get_address(section_node, 0, | ||
112 | NULL, NULL); | ||
113 | |||
114 | seq_printf(s, "\tsection@%04x\t(%s)\n", | ||
115 | be32_to_cpu(*section_addr_p), | ||
116 | sram_data->name); | ||
117 | |||
118 | val = readl(base + sram_data->reg); | ||
119 | val >>= sram_data->offset; | ||
120 | val &= sram_data->width; | ||
121 | |||
122 | for (func = sram_data->func; func->func; func++) { | ||
123 | seq_printf(s, "\t\t%s%c\n", func->func, | ||
124 | func->val == val ? '*' : ' '); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | seq_puts(s, "\n"); | ||
129 | } | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int sunxi_sram_open(struct inode *inode, struct file *file) | ||
135 | { | ||
136 | return single_open(file, sunxi_sram_show, inode->i_private); | ||
137 | } | ||
138 | |||
139 | static const struct file_operations sunxi_sram_fops = { | ||
140 | .open = sunxi_sram_open, | ||
141 | .read = seq_read, | ||
142 | .llseek = seq_lseek, | ||
143 | .release = single_release, | ||
144 | }; | ||
145 | |||
146 | static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data *data) | ||
147 | { | ||
148 | return container_of(data, struct sunxi_sram_desc, data); | ||
149 | } | ||
150 | |||
151 | static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node, | ||
152 | unsigned int *value) | ||
153 | { | ||
154 | const struct of_device_id *match; | ||
155 | struct of_phandle_args args; | ||
156 | int ret; | ||
157 | |||
158 | ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0, | ||
159 | &args); | ||
160 | if (ret) | ||
161 | return ERR_PTR(ret); | ||
162 | |||
163 | if (!of_device_is_available(args.np)) { | ||
164 | ret = -EBUSY; | ||
165 | goto err; | ||
166 | } | ||
167 | |||
168 | if (value) | ||
169 | *value = args.args[0]; | ||
170 | |||
171 | match = of_match_node(sunxi_sram_dt_ids, args.np); | ||
172 | if (!match) { | ||
173 | ret = -EINVAL; | ||
174 | goto err; | ||
175 | } | ||
176 | |||
177 | of_node_put(args.np); | ||
178 | return match->data; | ||
179 | |||
180 | err: | ||
181 | of_node_put(args.np); | ||
182 | return ERR_PTR(ret); | ||
183 | } | ||
184 | |||
185 | int sunxi_sram_claim(struct device *dev) | ||
186 | { | ||
187 | const struct sunxi_sram_data *sram_data; | ||
188 | struct sunxi_sram_desc *sram_desc; | ||
189 | unsigned int device; | ||
190 | u32 val, mask; | ||
191 | |||
192 | if (IS_ERR(base)) | ||
193 | return -EPROBE_DEFER; | ||
194 | |||
195 | if (!dev || !dev->of_node) | ||
196 | return -EINVAL; | ||
197 | |||
198 | sram_data = sunxi_sram_of_parse(dev->of_node, &device); | ||
199 | if (IS_ERR(sram_data)) | ||
200 | return PTR_ERR(sram_data); | ||
201 | |||
202 | sram_desc = to_sram_desc(sram_data); | ||
203 | |||
204 | spin_lock(&sram_lock); | ||
205 | |||
206 | if (sram_desc->claimed) { | ||
207 | spin_unlock(&sram_lock); | ||
208 | return -EBUSY; | ||
209 | } | ||
210 | |||
211 | mask = GENMASK(sram_data->offset + sram_data->width, sram_data->offset); | ||
212 | val = readl(base + sram_data->reg); | ||
213 | val &= ~mask; | ||
214 | writel(val | ((device << sram_data->offset) & mask), | ||
215 | base + sram_data->reg); | ||
216 | |||
217 | spin_unlock(&sram_lock); | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | EXPORT_SYMBOL(sunxi_sram_claim); | ||
222 | |||
223 | int sunxi_sram_release(struct device *dev) | ||
224 | { | ||
225 | const struct sunxi_sram_data *sram_data; | ||
226 | struct sunxi_sram_desc *sram_desc; | ||
227 | |||
228 | if (!dev || !dev->of_node) | ||
229 | return -EINVAL; | ||
230 | |||
231 | sram_data = sunxi_sram_of_parse(dev->of_node, NULL); | ||
232 | if (IS_ERR(sram_data)) | ||
233 | return -EINVAL; | ||
234 | |||
235 | sram_desc = to_sram_desc(sram_data); | ||
236 | |||
237 | spin_lock(&sram_lock); | ||
238 | sram_desc->claimed = false; | ||
239 | spin_unlock(&sram_lock); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | EXPORT_SYMBOL(sunxi_sram_release); | ||
244 | |||
245 | static int sunxi_sram_probe(struct platform_device *pdev) | ||
246 | { | ||
247 | struct resource *res; | ||
248 | struct dentry *d; | ||
249 | |||
250 | sram_dev = &pdev->dev; | ||
251 | |||
252 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
253 | base = devm_ioremap_resource(&pdev->dev, res); | ||
254 | if (IS_ERR(base)) | ||
255 | return PTR_ERR(base); | ||
256 | |||
257 | of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
258 | |||
259 | d = debugfs_create_file("sram", S_IRUGO, NULL, NULL, | ||
260 | &sunxi_sram_fops); | ||
261 | if (!d) | ||
262 | return -ENOMEM; | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static const struct of_device_id sunxi_sram_dt_match[] = { | ||
268 | { .compatible = "allwinner,sun4i-a10-sram-controller" }, | ||
269 | { }, | ||
270 | }; | ||
271 | MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match); | ||
272 | |||
273 | static struct platform_driver sunxi_sram_driver = { | ||
274 | .driver = { | ||
275 | .name = "sunxi-sram", | ||
276 | .of_match_table = sunxi_sram_dt_match, | ||
277 | }, | ||
278 | .probe = sunxi_sram_probe, | ||
279 | }; | ||
280 | module_platform_driver(sunxi_sram_driver); | ||
281 | |||
282 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); | ||
283 | MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver"); | ||
284 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index 3bf5aba4caaa..73fad05d8f2c 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c | |||
@@ -28,8 +28,15 @@ | |||
28 | #define APBMISC_SIZE 0x64 | 28 | #define APBMISC_SIZE 0x64 |
29 | #define FUSE_SKU_INFO 0x10 | 29 | #define FUSE_SKU_INFO 0x10 |
30 | 30 | ||
31 | #define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4 | ||
32 | #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \ | ||
33 | (0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT) | ||
34 | #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \ | ||
35 | (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT) | ||
36 | |||
31 | static void __iomem *apbmisc_base; | 37 | static void __iomem *apbmisc_base; |
32 | static void __iomem *strapping_base; | 38 | static void __iomem *strapping_base; |
39 | static bool long_ram_code; | ||
33 | 40 | ||
34 | u32 tegra_read_chipid(void) | 41 | u32 tegra_read_chipid(void) |
35 | { | 42 | { |
@@ -54,6 +61,18 @@ u32 tegra_read_straps(void) | |||
54 | return 0; | 61 | return 0; |
55 | } | 62 | } |
56 | 63 | ||
64 | u32 tegra_read_ram_code(void) | ||
65 | { | ||
66 | u32 straps = tegra_read_straps(); | ||
67 | |||
68 | if (long_ram_code) | ||
69 | straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG; | ||
70 | else | ||
71 | straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT; | ||
72 | |||
73 | return straps >> PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT; | ||
74 | } | ||
75 | |||
57 | static const struct of_device_id apbmisc_match[] __initconst = { | 76 | static const struct of_device_id apbmisc_match[] __initconst = { |
58 | { .compatible = "nvidia,tegra20-apbmisc", }, | 77 | { .compatible = "nvidia,tegra20-apbmisc", }, |
59 | {}, | 78 | {}, |
@@ -112,4 +131,6 @@ void __init tegra_init_apbmisc(void) | |||
112 | strapping_base = of_iomap(np, 1); | 131 | strapping_base = of_iomap(np, 1); |
113 | if (!strapping_base) | 132 | if (!strapping_base) |
114 | pr_err("ioremap tegra strapping_base failed\n"); | 133 | pr_err("ioremap tegra strapping_base failed\n"); |
134 | |||
135 | long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); | ||
115 | } | 136 | } |
diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 611b69fa8594..1f7bc630d225 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h | |||
@@ -54,11 +54,16 @@ struct mbus_dram_target_info | |||
54 | */ | 54 | */ |
55 | #ifdef CONFIG_PLAT_ORION | 55 | #ifdef CONFIG_PLAT_ORION |
56 | extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); | 56 | extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); |
57 | extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void); | ||
57 | #else | 58 | #else |
58 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) | 59 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) |
59 | { | 60 | { |
60 | return NULL; | 61 | return NULL; |
61 | } | 62 | } |
63 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) | ||
64 | { | ||
65 | return NULL; | ||
66 | } | ||
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | int mvebu_mbus_save_cpu_target(u32 *store_addr); | 69 | int mvebu_mbus_save_cpu_target(u32 *store_addr); |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index d7a974d5f57c..6e7d5ec65838 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. |
2 | * Copyright (C) 2015 Linaro Ltd. | 2 | * Copyright (C) 2015 Linaro Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
@@ -16,6 +16,17 @@ | |||
16 | extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); | 16 | extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); |
17 | extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); | 17 | extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); |
18 | 18 | ||
19 | #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 | ||
20 | |||
21 | struct qcom_scm_hdcp_req { | ||
22 | u32 addr; | ||
23 | u32 val; | ||
24 | }; | ||
25 | |||
26 | extern bool qcom_scm_hdcp_available(void); | ||
27 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, | ||
28 | u32 *resp); | ||
29 | |||
19 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 | 30 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 |
20 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 | 31 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 |
21 | 32 | ||
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h new file mode 100644 index 000000000000..c5f663bba9c2 --- /dev/null +++ b/include/linux/soc/sunxi/sunxi_sram.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Allwinner SoCs SRAM Controller Driver | ||
3 | * | ||
4 | * Copyright (C) 2015 Maxime Ripard | ||
5 | * | ||
6 | * Author: Maxime Ripard <maxime.ripard@free-electrons.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #ifndef _SUNXI_SRAM_H_ | ||
14 | #define _SUNXI_SRAM_H_ | ||
15 | |||
16 | int sunxi_sram_claim(struct device *dev); | ||
17 | int sunxi_sram_release(struct device *dev); | ||
18 | |||
19 | #endif /* _SUNXI_SRAM_H_ */ | ||
diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h new file mode 100644 index 000000000000..f6db33b579ec --- /dev/null +++ b/include/soc/tegra/emc.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 NVIDIA Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __SOC_TEGRA_EMC_H__ | ||
10 | #define __SOC_TEGRA_EMC_H__ | ||
11 | |||
12 | struct tegra_emc; | ||
13 | |||
14 | int tegra_emc_prepare_timing_change(struct tegra_emc *emc, | ||
15 | unsigned long rate); | ||
16 | void tegra_emc_complete_timing_change(struct tegra_emc *emc, | ||
17 | unsigned long rate); | ||
18 | |||
19 | #endif /* __SOC_TEGRA_EMC_H__ */ | ||
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index b5f7b5f8d008..b019e3465f11 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h | |||
@@ -56,6 +56,7 @@ struct tegra_sku_info { | |||
56 | }; | 56 | }; |
57 | 57 | ||
58 | u32 tegra_read_straps(void); | 58 | u32 tegra_read_straps(void); |
59 | u32 tegra_read_ram_code(void); | ||
59 | u32 tegra_read_chipid(void); | 60 | u32 tegra_read_chipid(void); |
60 | int tegra_fuse_readl(unsigned long offset, u32 *value); | 61 | int tegra_fuse_readl(unsigned long offset, u32 *value); |
61 | 62 | ||
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 63deb8d9f82a..1ab2813273cd 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h | |||
@@ -20,6 +20,12 @@ struct tegra_smmu_enable { | |||
20 | unsigned int bit; | 20 | unsigned int bit; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct tegra_mc_timing { | ||
24 | unsigned long rate; | ||
25 | |||
26 | u32 *emem_data; | ||
27 | }; | ||
28 | |||
23 | /* latency allowance */ | 29 | /* latency allowance */ |
24 | struct tegra_mc_la { | 30 | struct tegra_mc_la { |
25 | unsigned int reg; | 31 | unsigned int reg; |
@@ -40,6 +46,7 @@ struct tegra_mc_client { | |||
40 | }; | 46 | }; |
41 | 47 | ||
42 | struct tegra_smmu_swgroup { | 48 | struct tegra_smmu_swgroup { |
49 | const char *name; | ||
43 | unsigned int swgroup; | 50 | unsigned int swgroup; |
44 | unsigned int reg; | 51 | unsigned int reg; |
45 | }; | 52 | }; |
@@ -71,6 +78,7 @@ struct tegra_smmu; | |||
71 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, | 78 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
72 | const struct tegra_smmu_soc *soc, | 79 | const struct tegra_smmu_soc *soc, |
73 | struct tegra_mc *mc); | 80 | struct tegra_mc *mc); |
81 | void tegra_smmu_remove(struct tegra_smmu *smmu); | ||
74 | #else | 82 | #else |
75 | static inline struct tegra_smmu * | 83 | static inline struct tegra_smmu * |
76 | tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, | 84 | tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, |
@@ -78,13 +86,17 @@ tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, | |||
78 | { | 86 | { |
79 | return NULL; | 87 | return NULL; |
80 | } | 88 | } |
89 | |||
90 | static inline void tegra_smmu_remove(struct tegra_smmu *smmu) | ||
91 | { | ||
92 | } | ||
81 | #endif | 93 | #endif |
82 | 94 | ||
83 | struct tegra_mc_soc { | 95 | struct tegra_mc_soc { |
84 | const struct tegra_mc_client *clients; | 96 | const struct tegra_mc_client *clients; |
85 | unsigned int num_clients; | 97 | unsigned int num_clients; |
86 | 98 | ||
87 | const unsigned int *emem_regs; | 99 | const unsigned long *emem_regs; |
88 | unsigned int num_emem_regs; | 100 | unsigned int num_emem_regs; |
89 | 101 | ||
90 | unsigned int num_address_bits; | 102 | unsigned int num_address_bits; |
@@ -102,6 +114,12 @@ struct tegra_mc { | |||
102 | 114 | ||
103 | const struct tegra_mc_soc *soc; | 115 | const struct tegra_mc_soc *soc; |
104 | unsigned long tick; | 116 | unsigned long tick; |
117 | |||
118 | struct tegra_mc_timing *timings; | ||
119 | unsigned int num_timings; | ||
105 | }; | 120 | }; |
106 | 121 | ||
122 | void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate); | ||
123 | unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc); | ||
124 | |||
107 | #endif /* __SOC_TEGRA_MC_H__ */ | 125 | #endif /* __SOC_TEGRA_MC_H__ */ |