diff options
Diffstat (limited to 'drivers')
54 files changed, 4370 insertions, 349 deletions
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 1f44e56cc65d..558a239954e8 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c | |||
@@ -256,8 +256,6 @@ static int tegra_ahb_probe(struct platform_device *pdev) | |||
256 | return -ENOMEM; | 256 | return -ENOMEM; |
257 | 257 | ||
258 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 258 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
259 | if (!res) | ||
260 | return -ENODEV; | ||
261 | ahb->regs = devm_ioremap_resource(&pdev->dev, res); | 259 | ahb->regs = devm_ioremap_resource(&pdev->dev, res); |
262 | if (IS_ERR(ahb->regs)) | 260 | if (IS_ERR(ahb->regs)) |
263 | return PTR_ERR(ahb->regs); | 261 | return PTR_ERR(ahb->regs); |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 962fd35cbd8d..5a86da97a70b 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -31,7 +31,6 @@ | |||
31 | 31 | ||
32 | #define DRIVER_NAME "CCI-400" | 32 | #define DRIVER_NAME "CCI-400" |
33 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | 33 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" |
34 | #define PMU_NAME "CCI_400" | ||
35 | 34 | ||
36 | #define CCI_PORT_CTRL 0x0 | 35 | #define CCI_PORT_CTRL 0x0 |
37 | #define CCI_CTRL_STATUS 0xc | 36 | #define CCI_CTRL_STATUS 0xc |
@@ -88,8 +87,7 @@ static unsigned long cci_ctrl_phys; | |||
88 | 87 | ||
89 | #define CCI_REV_R0 0 | 88 | #define CCI_REV_R0 0 |
90 | #define CCI_REV_R1 1 | 89 | #define CCI_REV_R1 1 |
91 | #define CCI_REV_R0_P4 4 | 90 | #define CCI_REV_R1_PX 5 |
92 | #define CCI_REV_R1_P2 6 | ||
93 | 91 | ||
94 | #define CCI_PMU_EVT_SEL 0x000 | 92 | #define CCI_PMU_EVT_SEL 0x000 |
95 | #define CCI_PMU_CNTR 0x004 | 93 | #define CCI_PMU_CNTR 0x004 |
@@ -163,6 +161,15 @@ static struct pmu_port_event_ranges port_event_range[] = { | |||
163 | }, | 161 | }, |
164 | }; | 162 | }; |
165 | 163 | ||
164 | /* | ||
165 | * Export different PMU names for the different revisions so userspace knows | ||
166 | * because the event ids are different | ||
167 | */ | ||
168 | static char *const pmu_names[] = { | ||
169 | [CCI_REV_R0] = "CCI_400", | ||
170 | [CCI_REV_R1] = "CCI_400_r1", | ||
171 | }; | ||
172 | |||
166 | struct cci_pmu_drv_data { | 173 | struct cci_pmu_drv_data { |
167 | void __iomem *base; | 174 | void __iomem *base; |
168 | struct arm_pmu *cci_pmu; | 175 | struct arm_pmu *cci_pmu; |
@@ -193,21 +200,16 @@ static int probe_cci_revision(void) | |||
193 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | 200 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; |
194 | rev >>= CCI_PID2_REV_SHIFT; | 201 | rev >>= CCI_PID2_REV_SHIFT; |
195 | 202 | ||
196 | if (rev <= CCI_REV_R0_P4) | 203 | if (rev < CCI_REV_R1_PX) |
197 | return CCI_REV_R0; | 204 | return CCI_REV_R0; |
198 | else if (rev <= CCI_REV_R1_P2) | 205 | else |
199 | return CCI_REV_R1; | 206 | return CCI_REV_R1; |
200 | |||
201 | return -ENOENT; | ||
202 | } | 207 | } |
203 | 208 | ||
204 | static struct pmu_port_event_ranges *port_range_by_rev(void) | 209 | static struct pmu_port_event_ranges *port_range_by_rev(void) |
205 | { | 210 | { |
206 | int rev = probe_cci_revision(); | 211 | int rev = probe_cci_revision(); |
207 | 212 | ||
208 | if (rev < 0) | ||
209 | return NULL; | ||
210 | |||
211 | return &port_event_range[rev]; | 213 | return &port_event_range[rev]; |
212 | } | 214 | } |
213 | 215 | ||
@@ -526,7 +528,7 @@ static void pmu_write_counter(struct perf_event *event, u32 value) | |||
526 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) | 528 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) |
527 | { | 529 | { |
528 | *cci_pmu = (struct arm_pmu){ | 530 | *cci_pmu = (struct arm_pmu){ |
529 | .name = PMU_NAME, | 531 | .name = pmu_names[probe_cci_revision()], |
530 | .max_period = (1LLU << 32) - 1, | 532 | .max_period = (1LLU << 32) - 1, |
531 | .get_hw_events = pmu_get_hw_events, | 533 | .get_hw_events = pmu_get_hw_events, |
532 | .get_event_idx = pmu_get_event_idx, | 534 | .get_event_idx = pmu_get_event_idx, |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 2ac754e18bcf..293e2e0a0a87 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -890,13 +890,12 @@ int __init mvebu_mbus_dt_init(void) | |||
890 | const __be32 *prop; | 890 | const __be32 *prop; |
891 | int ret; | 891 | int ret; |
892 | 892 | ||
893 | np = of_find_matching_node(NULL, of_mvebu_mbus_ids); | 893 | np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id); |
894 | if (!np) { | 894 | if (!np) { |
895 | pr_err("could not find a matching SoC family\n"); | 895 | pr_err("could not find a matching SoC family\n"); |
896 | return -ENODEV; | 896 | return -ENODEV; |
897 | } | 897 | } |
898 | 898 | ||
899 | of_id = of_match_node(of_mvebu_mbus_ids, np); | ||
900 | mbus_state.soc = of_id->data; | 899 | mbus_state.soc = of_id->data; |
901 | 900 | ||
902 | prop = of_get_property(np, "controller", NULL); | 901 | prop = of_get_property(np, "controller", NULL); |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f2b08457c67..244759bbd7b7 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -342,11 +342,11 @@ config HW_RANDOM_TPM | |||
342 | If unsure, say Y. | 342 | If unsure, say Y. |
343 | 343 | ||
344 | config HW_RANDOM_MSM | 344 | config HW_RANDOM_MSM |
345 | tristate "Qualcomm MSM Random Number Generator support" | 345 | tristate "Qualcomm SoCs Random Number Generator support" |
346 | depends on HW_RANDOM && ARCH_MSM | 346 | depends on HW_RANDOM && ARCH_QCOM |
347 | ---help--- | 347 | ---help--- |
348 | This driver provides kernel-side support for the Random Number | 348 | This driver provides kernel-side support for the Random Number |
349 | Generator hardware found on Qualcomm MSM SoCs. | 349 | Generator hardware found on Qualcomm SoCs. |
350 | 350 | ||
351 | To compile this driver as a module, choose M here. the | 351 | To compile this driver as a module, choose M here. the |
352 | module will be called msm-rng. | 352 | module will be called msm-rng. |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 7641965d208d..f9f605695e40 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
@@ -111,4 +111,5 @@ source "drivers/clk/qcom/Kconfig" | |||
111 | 111 | ||
112 | endmenu | 112 | endmenu |
113 | 113 | ||
114 | source "drivers/clk/bcm/Kconfig" | ||
114 | source "drivers/clk/mvebu/Kconfig" | 115 | source "drivers/clk/mvebu/Kconfig" |
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index a367a9831717..88af4a399d6c 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o | |||
29 | obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o | 29 | obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o |
30 | obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o | 30 | obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o |
31 | obj-$(CONFIG_COMMON_CLK_AT91) += at91/ | 31 | obj-$(CONFIG_COMMON_CLK_AT91) += at91/ |
32 | obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/ | ||
32 | obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ | 33 | obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ |
33 | obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ | 34 | obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ |
34 | ifeq ($(CONFIG_COMMON_CLK), y) | 35 | ifeq ($(CONFIG_COMMON_CLK), y) |
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig new file mode 100644 index 000000000000..a7262fb8ce55 --- /dev/null +++ b/drivers/clk/bcm/Kconfig | |||
@@ -0,0 +1,9 @@ | |||
1 | config CLK_BCM_KONA | ||
2 | bool "Broadcom Kona CCU clock support" | ||
3 | depends on ARCH_BCM_MOBILE | ||
4 | depends on COMMON_CLK | ||
5 | default y | ||
6 | help | ||
7 | Enable common clock framework support for Broadcom SoCs | ||
8 | using "Kona" style clock control units, including those | ||
9 | in the BCM281xx family. | ||
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile new file mode 100644 index 000000000000..cf93359aa862 --- /dev/null +++ b/drivers/clk/bcm/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o | ||
2 | obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o | ||
3 | obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o | ||
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c new file mode 100644 index 000000000000..3c66de696aeb --- /dev/null +++ b/drivers/clk/bcm/clk-bcm281xx.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Broadcom Corporation | ||
3 | * Copyright 2013 Linaro Limited | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation version 2. | ||
8 | * | ||
9 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
10 | * kind, whether express or implied; without even the implied warranty | ||
11 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include "clk-kona.h" | ||
16 | #include "dt-bindings/clock/bcm281xx.h" | ||
17 | |||
18 | /* bcm11351 CCU device tree "compatible" strings */ | ||
19 | #define BCM11351_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu" | ||
20 | #define BCM11351_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu" | ||
21 | #define BCM11351_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu" | ||
22 | #define BCM11351_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu" | ||
23 | #define BCM11351_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu" | ||
24 | |||
25 | /* Root CCU clocks */ | ||
26 | |||
27 | static struct peri_clk_data frac_1m_data = { | ||
28 | .gate = HW_SW_GATE(0x214, 16, 0, 1), | ||
29 | .trig = TRIGGER(0x0e04, 0), | ||
30 | .div = FRAC_DIVIDER(0x0e00, 0, 22, 16), | ||
31 | .clocks = CLOCKS("ref_crystal"), | ||
32 | }; | ||
33 | |||
34 | /* AON CCU clocks */ | ||
35 | |||
36 | static struct peri_clk_data hub_timer_data = { | ||
37 | .gate = HW_SW_GATE(0x0414, 16, 0, 1), | ||
38 | .clocks = CLOCKS("bbl_32k", | ||
39 | "frac_1m", | ||
40 | "dft_19_5m"), | ||
41 | .sel = SELECTOR(0x0a10, 0, 2), | ||
42 | .trig = TRIGGER(0x0a40, 4), | ||
43 | }; | ||
44 | |||
45 | static struct peri_clk_data pmu_bsc_data = { | ||
46 | .gate = HW_SW_GATE(0x0418, 16, 0, 1), | ||
47 | .clocks = CLOCKS("ref_crystal", | ||
48 | "pmu_bsc_var", | ||
49 | "bbl_32k"), | ||
50 | .sel = SELECTOR(0x0a04, 0, 2), | ||
51 | .div = DIVIDER(0x0a04, 3, 4), | ||
52 | .trig = TRIGGER(0x0a40, 0), | ||
53 | }; | ||
54 | |||
55 | static struct peri_clk_data pmu_bsc_var_data = { | ||
56 | .clocks = CLOCKS("var_312m", | ||
57 | "ref_312m"), | ||
58 | .sel = SELECTOR(0x0a00, 0, 2), | ||
59 | .div = DIVIDER(0x0a00, 4, 5), | ||
60 | .trig = TRIGGER(0x0a40, 2), | ||
61 | }; | ||
62 | |||
63 | /* Hub CCU clocks */ | ||
64 | |||
65 | static struct peri_clk_data tmon_1m_data = { | ||
66 | .gate = HW_SW_GATE(0x04a4, 18, 2, 3), | ||
67 | .clocks = CLOCKS("ref_crystal", | ||
68 | "frac_1m"), | ||
69 | .sel = SELECTOR(0x0e74, 0, 2), | ||
70 | .trig = TRIGGER(0x0e84, 1), | ||
71 | }; | ||
72 | |||
73 | /* Master CCU clocks */ | ||
74 | |||
75 | static struct peri_clk_data sdio1_data = { | ||
76 | .gate = HW_SW_GATE(0x0358, 18, 2, 3), | ||
77 | .clocks = CLOCKS("ref_crystal", | ||
78 | "var_52m", | ||
79 | "ref_52m", | ||
80 | "var_96m", | ||
81 | "ref_96m"), | ||
82 | .sel = SELECTOR(0x0a28, 0, 3), | ||
83 | .div = DIVIDER(0x0a28, 4, 14), | ||
84 | .trig = TRIGGER(0x0afc, 9), | ||
85 | }; | ||
86 | |||
87 | static struct peri_clk_data sdio2_data = { | ||
88 | .gate = HW_SW_GATE(0x035c, 18, 2, 3), | ||
89 | .clocks = CLOCKS("ref_crystal", | ||
90 | "var_52m", | ||
91 | "ref_52m", | ||
92 | "var_96m", | ||
93 | "ref_96m"), | ||
94 | .sel = SELECTOR(0x0a2c, 0, 3), | ||
95 | .div = DIVIDER(0x0a2c, 4, 14), | ||
96 | .trig = TRIGGER(0x0afc, 10), | ||
97 | }; | ||
98 | |||
99 | static struct peri_clk_data sdio3_data = { | ||
100 | .gate = HW_SW_GATE(0x0364, 18, 2, 3), | ||
101 | .clocks = CLOCKS("ref_crystal", | ||
102 | "var_52m", | ||
103 | "ref_52m", | ||
104 | "var_96m", | ||
105 | "ref_96m"), | ||
106 | .sel = SELECTOR(0x0a34, 0, 3), | ||
107 | .div = DIVIDER(0x0a34, 4, 14), | ||
108 | .trig = TRIGGER(0x0afc, 12), | ||
109 | }; | ||
110 | |||
111 | static struct peri_clk_data sdio4_data = { | ||
112 | .gate = HW_SW_GATE(0x0360, 18, 2, 3), | ||
113 | .clocks = CLOCKS("ref_crystal", | ||
114 | "var_52m", | ||
115 | "ref_52m", | ||
116 | "var_96m", | ||
117 | "ref_96m"), | ||
118 | .sel = SELECTOR(0x0a30, 0, 3), | ||
119 | .div = DIVIDER(0x0a30, 4, 14), | ||
120 | .trig = TRIGGER(0x0afc, 11), | ||
121 | }; | ||
122 | |||
123 | static struct peri_clk_data usb_ic_data = { | ||
124 | .gate = HW_SW_GATE(0x0354, 18, 2, 3), | ||
125 | .clocks = CLOCKS("ref_crystal", | ||
126 | "var_96m", | ||
127 | "ref_96m"), | ||
128 | .div = FIXED_DIVIDER(2), | ||
129 | .sel = SELECTOR(0x0a24, 0, 2), | ||
130 | .trig = TRIGGER(0x0afc, 7), | ||
131 | }; | ||
132 | |||
133 | /* also called usbh_48m */ | ||
134 | static struct peri_clk_data hsic2_48m_data = { | ||
135 | .gate = HW_SW_GATE(0x0370, 18, 2, 3), | ||
136 | .clocks = CLOCKS("ref_crystal", | ||
137 | "var_96m", | ||
138 | "ref_96m"), | ||
139 | .sel = SELECTOR(0x0a38, 0, 2), | ||
140 | .div = FIXED_DIVIDER(2), | ||
141 | .trig = TRIGGER(0x0afc, 5), | ||
142 | }; | ||
143 | |||
144 | /* also called usbh_12m */ | ||
145 | static struct peri_clk_data hsic2_12m_data = { | ||
146 | .gate = HW_SW_GATE(0x0370, 20, 4, 5), | ||
147 | .div = DIVIDER(0x0a38, 12, 2), | ||
148 | .clocks = CLOCKS("ref_crystal", | ||
149 | "var_96m", | ||
150 | "ref_96m"), | ||
151 | .pre_div = FIXED_DIVIDER(2), | ||
152 | .sel = SELECTOR(0x0a38, 0, 2), | ||
153 | .trig = TRIGGER(0x0afc, 5), | ||
154 | }; | ||
155 | |||
156 | /* Slave CCU clocks */ | ||
157 | |||
158 | static struct peri_clk_data uartb_data = { | ||
159 | .gate = HW_SW_GATE(0x0400, 18, 2, 3), | ||
160 | .clocks = CLOCKS("ref_crystal", | ||
161 | "var_156m", | ||
162 | "ref_156m"), | ||
163 | .sel = SELECTOR(0x0a10, 0, 2), | ||
164 | .div = FRAC_DIVIDER(0x0a10, 4, 12, 8), | ||
165 | .trig = TRIGGER(0x0afc, 2), | ||
166 | }; | ||
167 | |||
168 | static struct peri_clk_data uartb2_data = { | ||
169 | .gate = HW_SW_GATE(0x0404, 18, 2, 3), | ||
170 | .clocks = CLOCKS("ref_crystal", | ||
171 | "var_156m", | ||
172 | "ref_156m"), | ||
173 | .sel = SELECTOR(0x0a14, 0, 2), | ||
174 | .div = FRAC_DIVIDER(0x0a14, 4, 12, 8), | ||
175 | .trig = TRIGGER(0x0afc, 3), | ||
176 | }; | ||
177 | |||
178 | static struct peri_clk_data uartb3_data = { | ||
179 | .gate = HW_SW_GATE(0x0408, 18, 2, 3), | ||
180 | .clocks = CLOCKS("ref_crystal", | ||
181 | "var_156m", | ||
182 | "ref_156m"), | ||
183 | .sel = SELECTOR(0x0a18, 0, 2), | ||
184 | .div = FRAC_DIVIDER(0x0a18, 4, 12, 8), | ||
185 | .trig = TRIGGER(0x0afc, 4), | ||
186 | }; | ||
187 | |||
188 | static struct peri_clk_data uartb4_data = { | ||
189 | .gate = HW_SW_GATE(0x0408, 18, 2, 3), | ||
190 | .clocks = CLOCKS("ref_crystal", | ||
191 | "var_156m", | ||
192 | "ref_156m"), | ||
193 | .sel = SELECTOR(0x0a1c, 0, 2), | ||
194 | .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8), | ||
195 | .trig = TRIGGER(0x0afc, 5), | ||
196 | }; | ||
197 | |||
198 | static struct peri_clk_data ssp0_data = { | ||
199 | .gate = HW_SW_GATE(0x0410, 18, 2, 3), | ||
200 | .clocks = CLOCKS("ref_crystal", | ||
201 | "var_104m", | ||
202 | "ref_104m", | ||
203 | "var_96m", | ||
204 | "ref_96m"), | ||
205 | .sel = SELECTOR(0x0a20, 0, 3), | ||
206 | .div = DIVIDER(0x0a20, 4, 14), | ||
207 | .trig = TRIGGER(0x0afc, 6), | ||
208 | }; | ||
209 | |||
210 | static struct peri_clk_data ssp2_data = { | ||
211 | .gate = HW_SW_GATE(0x0418, 18, 2, 3), | ||
212 | .clocks = CLOCKS("ref_crystal", | ||
213 | "var_104m", | ||
214 | "ref_104m", | ||
215 | "var_96m", | ||
216 | "ref_96m"), | ||
217 | .sel = SELECTOR(0x0a28, 0, 3), | ||
218 | .div = DIVIDER(0x0a28, 4, 14), | ||
219 | .trig = TRIGGER(0x0afc, 8), | ||
220 | }; | ||
221 | |||
222 | static struct peri_clk_data bsc1_data = { | ||
223 | .gate = HW_SW_GATE(0x0458, 18, 2, 3), | ||
224 | .clocks = CLOCKS("ref_crystal", | ||
225 | "var_104m", | ||
226 | "ref_104m", | ||
227 | "var_13m", | ||
228 | "ref_13m"), | ||
229 | .sel = SELECTOR(0x0a64, 0, 3), | ||
230 | .trig = TRIGGER(0x0afc, 23), | ||
231 | }; | ||
232 | |||
233 | static struct peri_clk_data bsc2_data = { | ||
234 | .gate = HW_SW_GATE(0x045c, 18, 2, 3), | ||
235 | .clocks = CLOCKS("ref_crystal", | ||
236 | "var_104m", | ||
237 | "ref_104m", | ||
238 | "var_13m", | ||
239 | "ref_13m"), | ||
240 | .sel = SELECTOR(0x0a68, 0, 3), | ||
241 | .trig = TRIGGER(0x0afc, 24), | ||
242 | }; | ||
243 | |||
244 | static struct peri_clk_data bsc3_data = { | ||
245 | .gate = HW_SW_GATE(0x0484, 18, 2, 3), | ||
246 | .clocks = CLOCKS("ref_crystal", | ||
247 | "var_104m", | ||
248 | "ref_104m", | ||
249 | "var_13m", | ||
250 | "ref_13m"), | ||
251 | .sel = SELECTOR(0x0a84, 0, 3), | ||
252 | .trig = TRIGGER(0x0b00, 2), | ||
253 | }; | ||
254 | |||
255 | static struct peri_clk_data pwm_data = { | ||
256 | .gate = HW_SW_GATE(0x0468, 18, 2, 3), | ||
257 | .clocks = CLOCKS("ref_crystal", | ||
258 | "var_104m"), | ||
259 | .sel = SELECTOR(0x0a70, 0, 2), | ||
260 | .div = DIVIDER(0x0a70, 4, 3), | ||
261 | .trig = TRIGGER(0x0afc, 15), | ||
262 | }; | ||
263 | |||
264 | /* | ||
265 | * CCU setup routines | ||
266 | * | ||
267 | * These are called from kona_dt_ccu_setup() to initialize the array | ||
268 | * of clocks provided by the CCU. Once allocated, the entries in | ||
269 | * the array are initialized by calling kona_clk_setup() with the | ||
270 | * initialization data for each clock. They return 0 if successful | ||
271 | * or an error code otherwise. | ||
272 | */ | ||
273 | static int __init bcm281xx_root_ccu_clks_setup(struct ccu_data *ccu) | ||
274 | { | ||
275 | struct clk **clks; | ||
276 | size_t count = BCM281XX_ROOT_CCU_CLOCK_COUNT; | ||
277 | |||
278 | clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); | ||
279 | if (!clks) { | ||
280 | pr_err("%s: failed to allocate root clocks\n", __func__); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | ccu->data.clks = clks; | ||
284 | ccu->data.clk_num = count; | ||
285 | |||
286 | PERI_CLK_SETUP(clks, ccu, BCM281XX_ROOT_CCU_FRAC_1M, frac_1m); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static int __init bcm281xx_aon_ccu_clks_setup(struct ccu_data *ccu) | ||
292 | { | ||
293 | struct clk **clks; | ||
294 | size_t count = BCM281XX_AON_CCU_CLOCK_COUNT; | ||
295 | |||
296 | clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); | ||
297 | if (!clks) { | ||
298 | pr_err("%s: failed to allocate aon clocks\n", __func__); | ||
299 | return -ENOMEM; | ||
300 | } | ||
301 | ccu->data.clks = clks; | ||
302 | ccu->data.clk_num = count; | ||
303 | |||
304 | PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_HUB_TIMER, hub_timer); | ||
305 | PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC, pmu_bsc); | ||
306 | PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC_VAR, pmu_bsc_var); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int __init bcm281xx_hub_ccu_clks_setup(struct ccu_data *ccu) | ||
312 | { | ||
313 | struct clk **clks; | ||
314 | size_t count = BCM281XX_HUB_CCU_CLOCK_COUNT; | ||
315 | |||
316 | clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); | ||
317 | if (!clks) { | ||
318 | pr_err("%s: failed to allocate hub clocks\n", __func__); | ||
319 | return -ENOMEM; | ||
320 | } | ||
321 | ccu->data.clks = clks; | ||
322 | ccu->data.clk_num = count; | ||
323 | |||
324 | PERI_CLK_SETUP(clks, ccu, BCM281XX_HUB_CCU_TMON_1M, tmon_1m); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int __init bcm281xx_master_ccu_clks_setup(struct ccu_data *ccu) | ||
330 | { | ||
331 | struct clk **clks; | ||
332 | size_t count = BCM281XX_MASTER_CCU_CLOCK_COUNT; | ||
333 | |||
334 | clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); | ||
335 | if (!clks) { | ||
336 | pr_err("%s: failed to allocate master clocks\n", __func__); | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | ccu->data.clks = clks; | ||
340 | ccu->data.clk_num = count; | ||
341 | |||
342 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO1, sdio1); | ||
343 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO2, sdio2); | ||
344 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO3, sdio3); | ||
345 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO4, sdio4); | ||
346 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_USB_IC, usb_ic); | ||
347 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_48M, hsic2_48m); | ||
348 | PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_12M, hsic2_12m); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int __init bcm281xx_slave_ccu_clks_setup(struct ccu_data *ccu) | ||
354 | { | ||
355 | struct clk **clks; | ||
356 | size_t count = BCM281XX_SLAVE_CCU_CLOCK_COUNT; | ||
357 | |||
358 | clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); | ||
359 | if (!clks) { | ||
360 | pr_err("%s: failed to allocate slave clocks\n", __func__); | ||
361 | return -ENOMEM; | ||
362 | } | ||
363 | ccu->data.clks = clks; | ||
364 | ccu->data.clk_num = count; | ||
365 | |||
366 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB, uartb); | ||
367 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB2, uartb2); | ||
368 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB3, uartb3); | ||
369 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB4, uartb4); | ||
370 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP0, ssp0); | ||
371 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP2, ssp2); | ||
372 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC1, bsc1); | ||
373 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC2, bsc2); | ||
374 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC3, bsc3); | ||
375 | PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_PWM, pwm); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | /* Device tree match table callback functions */ | ||
381 | |||
382 | static void __init kona_dt_root_ccu_setup(struct device_node *node) | ||
383 | { | ||
384 | kona_dt_ccu_setup(node, bcm281xx_root_ccu_clks_setup); | ||
385 | } | ||
386 | |||
387 | static void __init kona_dt_aon_ccu_setup(struct device_node *node) | ||
388 | { | ||
389 | kona_dt_ccu_setup(node, bcm281xx_aon_ccu_clks_setup); | ||
390 | } | ||
391 | |||
392 | static void __init kona_dt_hub_ccu_setup(struct device_node *node) | ||
393 | { | ||
394 | kona_dt_ccu_setup(node, bcm281xx_hub_ccu_clks_setup); | ||
395 | } | ||
396 | |||
397 | static void __init kona_dt_master_ccu_setup(struct device_node *node) | ||
398 | { | ||
399 | kona_dt_ccu_setup(node, bcm281xx_master_ccu_clks_setup); | ||
400 | } | ||
401 | |||
402 | static void __init kona_dt_slave_ccu_setup(struct device_node *node) | ||
403 | { | ||
404 | kona_dt_ccu_setup(node, bcm281xx_slave_ccu_clks_setup); | ||
405 | } | ||
406 | |||
407 | CLK_OF_DECLARE(bcm11351_root_ccu, BCM11351_DT_ROOT_CCU_COMPAT, | ||
408 | kona_dt_root_ccu_setup); | ||
409 | CLK_OF_DECLARE(bcm11351_aon_ccu, BCM11351_DT_AON_CCU_COMPAT, | ||
410 | kona_dt_aon_ccu_setup); | ||
411 | CLK_OF_DECLARE(bcm11351_hub_ccu, BCM11351_DT_HUB_CCU_COMPAT, | ||
412 | kona_dt_hub_ccu_setup); | ||
413 | CLK_OF_DECLARE(bcm11351_master_ccu, BCM11351_DT_MASTER_CCU_COMPAT, | ||
414 | kona_dt_master_ccu_setup); | ||
415 | CLK_OF_DECLARE(bcm11351_slave_ccu, BCM11351_DT_SLAVE_CCU_COMPAT, | ||
416 | kona_dt_slave_ccu_setup); | ||
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c new file mode 100644 index 000000000000..c7607feb18dd --- /dev/null +++ b/drivers/clk/bcm/clk-kona-setup.c | |||
@@ -0,0 +1,769 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Broadcom Corporation | ||
3 | * Copyright 2013 Linaro Limited | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation version 2. | ||
8 | * | ||
9 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
10 | * kind, whether express or implied; without even the implied warranty | ||
11 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/of_address.h> | ||
17 | |||
18 | #include "clk-kona.h" | ||
19 | |||
20 | /* These are used when a selector or trigger is found to be unneeded */ | ||
21 | #define selector_clear_exists(sel) ((sel)->width = 0) | ||
22 | #define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS) | ||
23 | |||
24 | LIST_HEAD(ccu_list); /* The list of set up CCUs */ | ||
25 | |||
26 | /* Validity checking */ | ||
27 | |||
28 | static bool clk_requires_trigger(struct kona_clk *bcm_clk) | ||
29 | { | ||
30 | struct peri_clk_data *peri = bcm_clk->peri; | ||
31 | struct bcm_clk_sel *sel; | ||
32 | struct bcm_clk_div *div; | ||
33 | |||
34 | if (bcm_clk->type != bcm_clk_peri) | ||
35 | return false; | ||
36 | |||
37 | sel = &peri->sel; | ||
38 | if (sel->parent_count && selector_exists(sel)) | ||
39 | return true; | ||
40 | |||
41 | div = &peri->div; | ||
42 | if (!divider_exists(div)) | ||
43 | return false; | ||
44 | |||
45 | /* Fixed dividers don't need triggers */ | ||
46 | if (!divider_is_fixed(div)) | ||
47 | return true; | ||
48 | |||
49 | div = &peri->pre_div; | ||
50 | |||
51 | return divider_exists(div) && !divider_is_fixed(div); | ||
52 | } | ||
53 | |||
54 | static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) | ||
55 | { | ||
56 | struct peri_clk_data *peri; | ||
57 | struct bcm_clk_gate *gate; | ||
58 | struct bcm_clk_div *div; | ||
59 | struct bcm_clk_sel *sel; | ||
60 | struct bcm_clk_trig *trig; | ||
61 | const char *name; | ||
62 | u32 range; | ||
63 | u32 limit; | ||
64 | |||
65 | BUG_ON(bcm_clk->type != bcm_clk_peri); | ||
66 | peri = bcm_clk->peri; | ||
67 | name = bcm_clk->name; | ||
68 | range = bcm_clk->ccu->range; | ||
69 | |||
70 | limit = range - sizeof(u32); | ||
71 | limit = round_down(limit, sizeof(u32)); | ||
72 | |||
73 | gate = &peri->gate; | ||
74 | if (gate_exists(gate)) { | ||
75 | if (gate->offset > limit) { | ||
76 | pr_err("%s: bad gate offset for %s (%u > %u)\n", | ||
77 | __func__, name, gate->offset, limit); | ||
78 | return false; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | div = &peri->div; | ||
83 | if (divider_exists(div)) { | ||
84 | if (div->offset > limit) { | ||
85 | pr_err("%s: bad divider offset for %s (%u > %u)\n", | ||
86 | __func__, name, div->offset, limit); | ||
87 | return false; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | div = &peri->pre_div; | ||
92 | if (divider_exists(div)) { | ||
93 | if (div->offset > limit) { | ||
94 | pr_err("%s: bad pre-divider offset for %s " | ||
95 | "(%u > %u)\n", | ||
96 | __func__, name, div->offset, limit); | ||
97 | return false; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | sel = &peri->sel; | ||
102 | if (selector_exists(sel)) { | ||
103 | if (sel->offset > limit) { | ||
104 | pr_err("%s: bad selector offset for %s (%u > %u)\n", | ||
105 | __func__, name, sel->offset, limit); | ||
106 | return false; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | trig = &peri->trig; | ||
111 | if (trigger_exists(trig)) { | ||
112 | if (trig->offset > limit) { | ||
113 | pr_err("%s: bad trigger offset for %s (%u > %u)\n", | ||
114 | __func__, name, trig->offset, limit); | ||
115 | return false; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | trig = &peri->pre_trig; | ||
120 | if (trigger_exists(trig)) { | ||
121 | if (trig->offset > limit) { | ||
122 | pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n", | ||
123 | __func__, name, trig->offset, limit); | ||
124 | return false; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return true; | ||
129 | } | ||
130 | |||
131 | /* A bit position must be less than the number of bits in a 32-bit register. */ | ||
132 | static bool bit_posn_valid(u32 bit_posn, const char *field_name, | ||
133 | const char *clock_name) | ||
134 | { | ||
135 | u32 limit = BITS_PER_BYTE * sizeof(u32) - 1; | ||
136 | |||
137 | if (bit_posn > limit) { | ||
138 | pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__, | ||
139 | field_name, clock_name, bit_posn, limit); | ||
140 | return false; | ||
141 | } | ||
142 | return true; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * A bitfield must be at least 1 bit wide. Both the low-order and | ||
147 | * high-order bits must lie within a 32-bit register. We require | ||
148 | * fields to be less than 32 bits wide, mainly because we use | ||
149 | * shifting to produce field masks, and shifting a full word width | ||
150 | * is not well-defined by the C standard. | ||
151 | */ | ||
152 | static bool bitfield_valid(u32 shift, u32 width, const char *field_name, | ||
153 | const char *clock_name) | ||
154 | { | ||
155 | u32 limit = BITS_PER_BYTE * sizeof(u32); | ||
156 | |||
157 | if (!width) { | ||
158 | pr_err("%s: bad %s field width 0 for %s\n", __func__, | ||
159 | field_name, clock_name); | ||
160 | return false; | ||
161 | } | ||
162 | if (shift + width > limit) { | ||
163 | pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__, | ||
164 | field_name, clock_name, shift, width, limit); | ||
165 | return false; | ||
166 | } | ||
167 | return true; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * All gates, if defined, have a status bit, and for hardware-only | ||
172 | * gates, that's it. Gates that can be software controlled also | ||
173 | * have an enable bit. And a gate that can be hardware or software | ||
174 | * controlled will have a hardware/software select bit. | ||
175 | */ | ||
176 | static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name, | ||
177 | const char *clock_name) | ||
178 | { | ||
179 | if (!bit_posn_valid(gate->status_bit, "gate status", clock_name)) | ||
180 | return false; | ||
181 | |||
182 | if (gate_is_sw_controllable(gate)) { | ||
183 | if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name)) | ||
184 | return false; | ||
185 | |||
186 | if (gate_is_hw_controllable(gate)) { | ||
187 | if (!bit_posn_valid(gate->hw_sw_sel_bit, | ||
188 | "gate hw/sw select", | ||
189 | clock_name)) | ||
190 | return false; | ||
191 | } | ||
192 | } else { | ||
193 | BUG_ON(!gate_is_hw_controllable(gate)); | ||
194 | } | ||
195 | |||
196 | return true; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * A selector bitfield must be valid. Its parent_sel array must | ||
201 | * also be reasonable for the field. | ||
202 | */ | ||
203 | static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name, | ||
204 | const char *clock_name) | ||
205 | { | ||
206 | if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name)) | ||
207 | return false; | ||
208 | |||
209 | if (sel->parent_count) { | ||
210 | u32 max_sel; | ||
211 | u32 limit; | ||
212 | |||
213 | /* | ||
214 | * Make sure the selector field can hold all the | ||
215 | * selector values we expect to be able to use. A | ||
216 | * clock only needs to have a selector defined if it | ||
217 | * has more than one parent. And in that case the | ||
218 | * highest selector value will be in the last entry | ||
219 | * in the array. | ||
220 | */ | ||
221 | max_sel = sel->parent_sel[sel->parent_count - 1]; | ||
222 | limit = (1 << sel->width) - 1; | ||
223 | if (max_sel > limit) { | ||
224 | pr_err("%s: bad selector for %s " | ||
225 | "(%u needs > %u bits)\n", | ||
226 | __func__, clock_name, max_sel, | ||
227 | sel->width); | ||
228 | return false; | ||
229 | } | ||
230 | } else { | ||
231 | pr_warn("%s: ignoring selector for %s (no parents)\n", | ||
232 | __func__, clock_name); | ||
233 | selector_clear_exists(sel); | ||
234 | kfree(sel->parent_sel); | ||
235 | sel->parent_sel = NULL; | ||
236 | } | ||
237 | |||
238 | return true; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * A fixed divider just needs to be non-zero. A variable divider | ||
243 | * has to have a valid divider bitfield, and if it has a fraction, | ||
244 | * the width of the fraction must not be no more than the width of | ||
245 | * the divider as a whole. | ||
246 | */ | ||
247 | static bool div_valid(struct bcm_clk_div *div, const char *field_name, | ||
248 | const char *clock_name) | ||
249 | { | ||
250 | if (divider_is_fixed(div)) { | ||
251 | /* Any fixed divider value but 0 is OK */ | ||
252 | if (div->fixed == 0) { | ||
253 | pr_err("%s: bad %s fixed value 0 for %s\n", __func__, | ||
254 | field_name, clock_name); | ||
255 | return false; | ||
256 | } | ||
257 | return true; | ||
258 | } | ||
259 | if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) | ||
260 | return false; | ||
261 | |||
262 | if (divider_has_fraction(div)) | ||
263 | if (div->frac_width > div->width) { | ||
264 | pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", | ||
265 | __func__, field_name, clock_name, | ||
266 | div->frac_width, div->width); | ||
267 | return false; | ||
268 | } | ||
269 | |||
270 | return true; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * If a clock has two dividers, the combined number of fractional | ||
275 | * bits must be representable in a 32-bit unsigned value. This | ||
276 | * is because we scale up a dividend using both dividers before | ||
277 | * dividing to improve accuracy, and we need to avoid overflow. | ||
278 | */ | ||
279 | static bool kona_dividers_valid(struct kona_clk *bcm_clk) | ||
280 | { | ||
281 | struct peri_clk_data *peri = bcm_clk->peri; | ||
282 | struct bcm_clk_div *div; | ||
283 | struct bcm_clk_div *pre_div; | ||
284 | u32 limit; | ||
285 | |||
286 | BUG_ON(bcm_clk->type != bcm_clk_peri); | ||
287 | |||
288 | if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div)) | ||
289 | return true; | ||
290 | |||
291 | div = &peri->div; | ||
292 | pre_div = &peri->pre_div; | ||
293 | if (divider_is_fixed(div) || divider_is_fixed(pre_div)) | ||
294 | return true; | ||
295 | |||
296 | limit = BITS_PER_BYTE * sizeof(u32); | ||
297 | |||
298 | return div->frac_width + pre_div->frac_width <= limit; | ||
299 | } | ||
300 | |||
301 | |||
302 | /* A trigger just needs to represent a valid bit position */ | ||
303 | static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name, | ||
304 | const char *clock_name) | ||
305 | { | ||
306 | return bit_posn_valid(trig->bit, field_name, clock_name); | ||
307 | } | ||
308 | |||
309 | /* Determine whether the set of peripheral clock registers are valid. */ | ||
310 | static bool | ||
311 | peri_clk_data_valid(struct kona_clk *bcm_clk) | ||
312 | { | ||
313 | struct peri_clk_data *peri; | ||
314 | struct bcm_clk_gate *gate; | ||
315 | struct bcm_clk_sel *sel; | ||
316 | struct bcm_clk_div *div; | ||
317 | struct bcm_clk_div *pre_div; | ||
318 | struct bcm_clk_trig *trig; | ||
319 | const char *name; | ||
320 | |||
321 | BUG_ON(bcm_clk->type != bcm_clk_peri); | ||
322 | |||
323 | /* | ||
324 | * First validate register offsets. This is the only place | ||
325 | * where we need something from the ccu, so we do these | ||
326 | * together. | ||
327 | */ | ||
328 | if (!peri_clk_data_offsets_valid(bcm_clk)) | ||
329 | return false; | ||
330 | |||
331 | peri = bcm_clk->peri; | ||
332 | name = bcm_clk->name; | ||
333 | gate = &peri->gate; | ||
334 | if (gate_exists(gate) && !gate_valid(gate, "gate", name)) | ||
335 | return false; | ||
336 | |||
337 | sel = &peri->sel; | ||
338 | if (selector_exists(sel)) { | ||
339 | if (!sel_valid(sel, "selector", name)) | ||
340 | return false; | ||
341 | |||
342 | } else if (sel->parent_count > 1) { | ||
343 | pr_err("%s: multiple parents but no selector for %s\n", | ||
344 | __func__, name); | ||
345 | |||
346 | return false; | ||
347 | } | ||
348 | |||
349 | div = &peri->div; | ||
350 | pre_div = &peri->pre_div; | ||
351 | if (divider_exists(div)) { | ||
352 | if (!div_valid(div, "divider", name)) | ||
353 | return false; | ||
354 | |||
355 | if (divider_exists(pre_div)) | ||
356 | if (!div_valid(pre_div, "pre-divider", name)) | ||
357 | return false; | ||
358 | } else if (divider_exists(pre_div)) { | ||
359 | pr_err("%s: pre-divider but no divider for %s\n", __func__, | ||
360 | name); | ||
361 | return false; | ||
362 | } | ||
363 | |||
364 | trig = &peri->trig; | ||
365 | if (trigger_exists(trig)) { | ||
366 | if (!trig_valid(trig, "trigger", name)) | ||
367 | return false; | ||
368 | |||
369 | if (trigger_exists(&peri->pre_trig)) { | ||
370 | if (!trig_valid(trig, "pre-trigger", name)) { | ||
371 | return false; | ||
372 | } | ||
373 | } | ||
374 | if (!clk_requires_trigger(bcm_clk)) { | ||
375 | pr_warn("%s: ignoring trigger for %s (not needed)\n", | ||
376 | __func__, name); | ||
377 | trigger_clear_exists(trig); | ||
378 | } | ||
379 | } else if (trigger_exists(&peri->pre_trig)) { | ||
380 | pr_err("%s: pre-trigger but no trigger for %s\n", __func__, | ||
381 | name); | ||
382 | return false; | ||
383 | } else if (clk_requires_trigger(bcm_clk)) { | ||
384 | pr_err("%s: required trigger missing for %s\n", __func__, | ||
385 | name); | ||
386 | return false; | ||
387 | } | ||
388 | |||
389 | return kona_dividers_valid(bcm_clk); | ||
390 | } | ||
391 | |||
392 | static bool kona_clk_valid(struct kona_clk *bcm_clk) | ||
393 | { | ||
394 | switch (bcm_clk->type) { | ||
395 | case bcm_clk_peri: | ||
396 | if (!peri_clk_data_valid(bcm_clk)) | ||
397 | return false; | ||
398 | break; | ||
399 | default: | ||
400 | pr_err("%s: unrecognized clock type (%d)\n", __func__, | ||
401 | (int)bcm_clk->type); | ||
402 | return false; | ||
403 | } | ||
404 | return true; | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * Scan an array of parent clock names to determine whether there | ||
409 | * are any entries containing BAD_CLK_NAME. Such entries are | ||
410 | * placeholders for non-supported clocks. Keep track of the | ||
411 | * position of each clock name in the original array. | ||
412 | * | ||
413 | * Allocates an array of pointers to to hold the names of all | ||
414 | * non-null entries in the original array, and returns a pointer to | ||
415 | * that array in *names. This will be used for registering the | ||
416 | * clock with the common clock code. On successful return, | ||
417 | * *count indicates how many entries are in that names array. | ||
418 | * | ||
419 | * If there is more than one entry in the resulting names array, | ||
420 | * another array is allocated to record the parent selector value | ||
421 | * for each (defined) parent clock. This is the value that | ||
422 | * represents this parent clock in the clock's source selector | ||
423 | * register. The position of the clock in the original parent array | ||
424 | * defines that selector value. The number of entries in this array | ||
425 | * is the same as the number of entries in the parent names array. | ||
426 | * | ||
427 | * The array of selector values is returned. If the clock has no | ||
428 | * parents, no selector is required and a null pointer is returned. | ||
429 | * | ||
430 | * Returns a null pointer if the clock names array supplied was | ||
431 | * null. (This is not an error.) | ||
432 | * | ||
433 | * Returns a pointer-coded error if an error occurs. | ||
434 | */ | ||
435 | static u32 *parent_process(const char *clocks[], | ||
436 | u32 *count, const char ***names) | ||
437 | { | ||
438 | static const char **parent_names; | ||
439 | static u32 *parent_sel; | ||
440 | const char **clock; | ||
441 | u32 parent_count; | ||
442 | u32 bad_count = 0; | ||
443 | u32 orig_count; | ||
444 | u32 i; | ||
445 | u32 j; | ||
446 | |||
447 | *count = 0; /* In case of early return */ | ||
448 | *names = NULL; | ||
449 | if (!clocks) | ||
450 | return NULL; | ||
451 | |||
452 | /* | ||
453 | * Count the number of names in the null-terminated array, | ||
454 | * and find out how many of those are actually clock names. | ||
455 | */ | ||
456 | for (clock = clocks; *clock; clock++) | ||
457 | if (*clock == BAD_CLK_NAME) | ||
458 | bad_count++; | ||
459 | orig_count = (u32)(clock - clocks); | ||
460 | parent_count = orig_count - bad_count; | ||
461 | |||
462 | /* If all clocks are unsupported, we treat it as no clock */ | ||
463 | if (!parent_count) | ||
464 | return NULL; | ||
465 | |||
466 | /* Avoid exceeding our parent clock limit */ | ||
467 | if (parent_count > PARENT_COUNT_MAX) { | ||
468 | pr_err("%s: too many parents (%u > %u)\n", __func__, | ||
469 | parent_count, PARENT_COUNT_MAX); | ||
470 | return ERR_PTR(-EINVAL); | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * There is one parent name for each defined parent clock. | ||
475 | * We also maintain an array containing the selector value | ||
476 | * for each defined clock. If there's only one clock, the | ||
477 | * selector is not required, but we allocate space for the | ||
478 | * array anyway to keep things simple. | ||
479 | */ | ||
480 | parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL); | ||
481 | if (!parent_names) { | ||
482 | pr_err("%s: error allocating %u parent names\n", __func__, | ||
483 | parent_count); | ||
484 | return ERR_PTR(-ENOMEM); | ||
485 | } | ||
486 | |||
487 | /* There is at least one parent, so allocate a selector array */ | ||
488 | |||
489 | parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL); | ||
490 | if (!parent_sel) { | ||
491 | pr_err("%s: error allocating %u parent selectors\n", __func__, | ||
492 | parent_count); | ||
493 | kfree(parent_names); | ||
494 | |||
495 | return ERR_PTR(-ENOMEM); | ||
496 | } | ||
497 | |||
498 | /* Now fill in the parent names and selector arrays */ | ||
499 | for (i = 0, j = 0; i < orig_count; i++) { | ||
500 | if (clocks[i] != BAD_CLK_NAME) { | ||
501 | parent_names[j] = clocks[i]; | ||
502 | parent_sel[j] = i; | ||
503 | j++; | ||
504 | } | ||
505 | } | ||
506 | *names = parent_names; | ||
507 | *count = parent_count; | ||
508 | |||
509 | return parent_sel; | ||
510 | } | ||
511 | |||
512 | static int | ||
513 | clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel, | ||
514 | struct clk_init_data *init_data) | ||
515 | { | ||
516 | const char **parent_names = NULL; | ||
517 | u32 parent_count = 0; | ||
518 | u32 *parent_sel; | ||
519 | |||
520 | /* | ||
521 | * If a peripheral clock has multiple parents, the value | ||
522 | * used by the hardware to select that parent is represented | ||
523 | * by the parent clock's position in the "clocks" list. Some | ||
524 | * values don't have defined or supported clocks; these will | ||
525 | * have BAD_CLK_NAME entries in the parents[] array. The | ||
526 | * list is terminated by a NULL entry. | ||
527 | * | ||
528 | * We need to supply (only) the names of defined parent | ||
529 | * clocks when registering a clock though, so we use an | ||
530 | * array of parent selector values to map between the | ||
531 | * indexes the common clock code uses and the selector | ||
532 | * values we need. | ||
533 | */ | ||
534 | parent_sel = parent_process(clocks, &parent_count, &parent_names); | ||
535 | if (IS_ERR(parent_sel)) { | ||
536 | int ret = PTR_ERR(parent_sel); | ||
537 | |||
538 | pr_err("%s: error processing parent clocks for %s (%d)\n", | ||
539 | __func__, init_data->name, ret); | ||
540 | |||
541 | return ret; | ||
542 | } | ||
543 | |||
544 | init_data->parent_names = parent_names; | ||
545 | init_data->num_parents = parent_count; | ||
546 | |||
547 | sel->parent_count = parent_count; | ||
548 | sel->parent_sel = parent_sel; | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static void clk_sel_teardown(struct bcm_clk_sel *sel, | ||
554 | struct clk_init_data *init_data) | ||
555 | { | ||
556 | kfree(sel->parent_sel); | ||
557 | sel->parent_sel = NULL; | ||
558 | sel->parent_count = 0; | ||
559 | |||
560 | init_data->num_parents = 0; | ||
561 | kfree(init_data->parent_names); | ||
562 | init_data->parent_names = NULL; | ||
563 | } | ||
564 | |||
565 | static void peri_clk_teardown(struct peri_clk_data *data, | ||
566 | struct clk_init_data *init_data) | ||
567 | { | ||
568 | clk_sel_teardown(&data->sel, init_data); | ||
569 | init_data->ops = NULL; | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * Caller is responsible for freeing the parent_names[] and | ||
574 | * parent_sel[] arrays in the peripheral clock's "data" structure | ||
575 | * that can be assigned if the clock has one or more parent clocks | ||
576 | * associated with it. | ||
577 | */ | ||
578 | static int peri_clk_setup(struct ccu_data *ccu, struct peri_clk_data *data, | ||
579 | struct clk_init_data *init_data) | ||
580 | { | ||
581 | init_data->ops = &kona_peri_clk_ops; | ||
582 | init_data->flags = CLK_IGNORE_UNUSED; | ||
583 | |||
584 | return clk_sel_setup(data->clocks, &data->sel, init_data); | ||
585 | } | ||
586 | |||
587 | static void bcm_clk_teardown(struct kona_clk *bcm_clk) | ||
588 | { | ||
589 | switch (bcm_clk->type) { | ||
590 | case bcm_clk_peri: | ||
591 | peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); | ||
592 | break; | ||
593 | default: | ||
594 | break; | ||
595 | } | ||
596 | bcm_clk->data = NULL; | ||
597 | bcm_clk->type = bcm_clk_none; | ||
598 | } | ||
599 | |||
600 | static void kona_clk_teardown(struct clk *clk) | ||
601 | { | ||
602 | struct clk_hw *hw; | ||
603 | struct kona_clk *bcm_clk; | ||
604 | |||
605 | if (!clk) | ||
606 | return; | ||
607 | |||
608 | hw = __clk_get_hw(clk); | ||
609 | if (!hw) { | ||
610 | pr_err("%s: clk %p has null hw pointer\n", __func__, clk); | ||
611 | return; | ||
612 | } | ||
613 | clk_unregister(clk); | ||
614 | |||
615 | bcm_clk = to_kona_clk(hw); | ||
616 | bcm_clk_teardown(bcm_clk); | ||
617 | } | ||
618 | |||
619 | struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, | ||
620 | enum bcm_clk_type type, void *data) | ||
621 | { | ||
622 | struct kona_clk *bcm_clk; | ||
623 | struct clk_init_data *init_data; | ||
624 | struct clk *clk = NULL; | ||
625 | |||
626 | bcm_clk = kzalloc(sizeof(*bcm_clk), GFP_KERNEL); | ||
627 | if (!bcm_clk) { | ||
628 | pr_err("%s: failed to allocate bcm_clk for %s\n", __func__, | ||
629 | name); | ||
630 | return NULL; | ||
631 | } | ||
632 | bcm_clk->ccu = ccu; | ||
633 | bcm_clk->name = name; | ||
634 | |||
635 | init_data = &bcm_clk->init_data; | ||
636 | init_data->name = name; | ||
637 | switch (type) { | ||
638 | case bcm_clk_peri: | ||
639 | if (peri_clk_setup(ccu, data, init_data)) | ||
640 | goto out_free; | ||
641 | break; | ||
642 | default: | ||
643 | data = NULL; | ||
644 | break; | ||
645 | } | ||
646 | bcm_clk->type = type; | ||
647 | bcm_clk->data = data; | ||
648 | |||
649 | /* Make sure everything makes sense before we set it up */ | ||
650 | if (!kona_clk_valid(bcm_clk)) { | ||
651 | pr_err("%s: clock data invalid for %s\n", __func__, name); | ||
652 | goto out_teardown; | ||
653 | } | ||
654 | |||
655 | bcm_clk->hw.init = init_data; | ||
656 | clk = clk_register(NULL, &bcm_clk->hw); | ||
657 | if (IS_ERR(clk)) { | ||
658 | pr_err("%s: error registering clock %s (%ld)\n", __func__, | ||
659 | name, PTR_ERR(clk)); | ||
660 | goto out_teardown; | ||
661 | } | ||
662 | BUG_ON(!clk); | ||
663 | |||
664 | return clk; | ||
665 | out_teardown: | ||
666 | bcm_clk_teardown(bcm_clk); | ||
667 | out_free: | ||
668 | kfree(bcm_clk); | ||
669 | |||
670 | return NULL; | ||
671 | } | ||
672 | |||
673 | static void ccu_clks_teardown(struct ccu_data *ccu) | ||
674 | { | ||
675 | u32 i; | ||
676 | |||
677 | for (i = 0; i < ccu->data.clk_num; i++) | ||
678 | kona_clk_teardown(ccu->data.clks[i]); | ||
679 | kfree(ccu->data.clks); | ||
680 | } | ||
681 | |||
682 | static void kona_ccu_teardown(struct ccu_data *ccu) | ||
683 | { | ||
684 | if (!ccu) | ||
685 | return; | ||
686 | |||
687 | if (!ccu->base) | ||
688 | goto done; | ||
689 | |||
690 | of_clk_del_provider(ccu->node); /* safe if never added */ | ||
691 | ccu_clks_teardown(ccu); | ||
692 | list_del(&ccu->links); | ||
693 | of_node_put(ccu->node); | ||
694 | iounmap(ccu->base); | ||
695 | done: | ||
696 | kfree(ccu->name); | ||
697 | kfree(ccu); | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Set up a CCU. Call the provided ccu_clks_setup callback to | ||
702 | * initialize the array of clocks provided by the CCU. | ||
703 | */ | ||
704 | void __init kona_dt_ccu_setup(struct device_node *node, | ||
705 | int (*ccu_clks_setup)(struct ccu_data *)) | ||
706 | { | ||
707 | struct ccu_data *ccu; | ||
708 | struct resource res = { 0 }; | ||
709 | resource_size_t range; | ||
710 | int ret; | ||
711 | |||
712 | ccu = kzalloc(sizeof(*ccu), GFP_KERNEL); | ||
713 | if (ccu) | ||
714 | ccu->name = kstrdup(node->name, GFP_KERNEL); | ||
715 | if (!ccu || !ccu->name) { | ||
716 | pr_err("%s: unable to allocate CCU struct for %s\n", | ||
717 | __func__, node->name); | ||
718 | kfree(ccu); | ||
719 | |||
720 | return; | ||
721 | } | ||
722 | |||
723 | ret = of_address_to_resource(node, 0, &res); | ||
724 | if (ret) { | ||
725 | pr_err("%s: no valid CCU registers found for %s\n", __func__, | ||
726 | node->name); | ||
727 | goto out_err; | ||
728 | } | ||
729 | |||
730 | range = resource_size(&res); | ||
731 | if (range > (resource_size_t)U32_MAX) { | ||
732 | pr_err("%s: address range too large for %s\n", __func__, | ||
733 | node->name); | ||
734 | goto out_err; | ||
735 | } | ||
736 | |||
737 | ccu->range = (u32)range; | ||
738 | ccu->base = ioremap(res.start, ccu->range); | ||
739 | if (!ccu->base) { | ||
740 | pr_err("%s: unable to map CCU registers for %s\n", __func__, | ||
741 | node->name); | ||
742 | goto out_err; | ||
743 | } | ||
744 | |||
745 | spin_lock_init(&ccu->lock); | ||
746 | INIT_LIST_HEAD(&ccu->links); | ||
747 | ccu->node = of_node_get(node); | ||
748 | |||
749 | list_add_tail(&ccu->links, &ccu_list); | ||
750 | |||
751 | /* Set up clocks array (in ccu->data) */ | ||
752 | if (ccu_clks_setup(ccu)) | ||
753 | goto out_err; | ||
754 | |||
755 | ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data); | ||
756 | if (ret) { | ||
757 | pr_err("%s: error adding ccu %s as provider (%d)\n", __func__, | ||
758 | node->name, ret); | ||
759 | goto out_err; | ||
760 | } | ||
761 | |||
762 | if (!kona_ccu_init(ccu)) | ||
763 | pr_err("Broadcom %s initialization had errors\n", node->name); | ||
764 | |||
765 | return; | ||
766 | out_err: | ||
767 | kona_ccu_teardown(ccu); | ||
768 | pr_err("Broadcom %s setup aborted\n", node->name); | ||
769 | } | ||
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c new file mode 100644 index 000000000000..e3d339e08309 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.c | |||
@@ -0,0 +1,1033 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Broadcom Corporation | ||
3 | * Copyright 2013 Linaro Limited | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation version 2. | ||
8 | * | ||
9 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
10 | * kind, whether express or implied; without even the implied warranty | ||
11 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include "clk-kona.h" | ||
16 | |||
17 | #include <linux/delay.h> | ||
18 | |||
19 | #define CCU_ACCESS_PASSWORD 0xA5A500 | ||
20 | #define CLK_GATE_DELAY_LOOP 2000 | ||
21 | |||
22 | /* Bitfield operations */ | ||
23 | |||
24 | /* Produces a mask of set bits covering a range of a 32-bit value */ | ||
25 | static inline u32 bitfield_mask(u32 shift, u32 width) | ||
26 | { | ||
27 | return ((1 << width) - 1) << shift; | ||
28 | } | ||
29 | |||
30 | /* Extract the value of a bitfield found within a given register value */ | ||
31 | static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) | ||
32 | { | ||
33 | return (reg_val & bitfield_mask(shift, width)) >> shift; | ||
34 | } | ||
35 | |||
36 | /* Replace the value of a bitfield found within a given register value */ | ||
37 | static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) | ||
38 | { | ||
39 | u32 mask = bitfield_mask(shift, width); | ||
40 | |||
41 | return (reg_val & ~mask) | (val << shift); | ||
42 | } | ||
43 | |||
44 | /* Divider and scaling helpers */ | ||
45 | |||
46 | /* | ||
47 | * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values | ||
48 | * unsigned. Note that unlike do_div(), the remainder is discarded | ||
49 | * and the return value is the quotient (not the remainder). | ||
50 | */ | ||
51 | u64 do_div_round_closest(u64 dividend, unsigned long divisor) | ||
52 | { | ||
53 | u64 result; | ||
54 | |||
55 | result = dividend + ((u64)divisor >> 1); | ||
56 | (void)do_div(result, divisor); | ||
57 | |||
58 | return result; | ||
59 | } | ||
60 | |||
61 | /* Convert a divider into the scaled divisor value it represents. */ | ||
62 | static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) | ||
63 | { | ||
64 | return (u64)reg_div + ((u64)1 << div->frac_width); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Build a scaled divider value as close as possible to the | ||
69 | * given whole part (div_value) and fractional part (expressed | ||
70 | * in billionths). | ||
71 | */ | ||
72 | u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) | ||
73 | { | ||
74 | u64 combined; | ||
75 | |||
76 | BUG_ON(!div_value); | ||
77 | BUG_ON(billionths >= BILLION); | ||
78 | |||
79 | combined = (u64)div_value * BILLION + billionths; | ||
80 | combined <<= div->frac_width; | ||
81 | |||
82 | return do_div_round_closest(combined, BILLION); | ||
83 | } | ||
84 | |||
85 | /* The scaled minimum divisor representable by a divider */ | ||
86 | static inline u64 | ||
87 | scaled_div_min(struct bcm_clk_div *div) | ||
88 | { | ||
89 | if (divider_is_fixed(div)) | ||
90 | return (u64)div->fixed; | ||
91 | |||
92 | return scaled_div_value(div, 0); | ||
93 | } | ||
94 | |||
95 | /* The scaled maximum divisor representable by a divider */ | ||
96 | u64 scaled_div_max(struct bcm_clk_div *div) | ||
97 | { | ||
98 | u32 reg_div; | ||
99 | |||
100 | if (divider_is_fixed(div)) | ||
101 | return (u64)div->fixed; | ||
102 | |||
103 | reg_div = ((u32)1 << div->width) - 1; | ||
104 | |||
105 | return scaled_div_value(div, reg_div); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Convert a scaled divisor into its divider representation as | ||
110 | * stored in a divider register field. | ||
111 | */ | ||
112 | static inline u32 | ||
113 | divider(struct bcm_clk_div *div, u64 scaled_div) | ||
114 | { | ||
115 | BUG_ON(scaled_div < scaled_div_min(div)); | ||
116 | BUG_ON(scaled_div > scaled_div_max(div)); | ||
117 | |||
118 | return (u32)(scaled_div - ((u64)1 << div->frac_width)); | ||
119 | } | ||
120 | |||
121 | /* Return a rate scaled for use when dividing by a scaled divisor. */ | ||
122 | static inline u64 | ||
123 | scale_rate(struct bcm_clk_div *div, u32 rate) | ||
124 | { | ||
125 | if (divider_is_fixed(div)) | ||
126 | return (u64)rate; | ||
127 | |||
128 | return (u64)rate << div->frac_width; | ||
129 | } | ||
130 | |||
131 | /* CCU access */ | ||
132 | |||
133 | /* Read a 32-bit register value from a CCU's address space. */ | ||
134 | static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) | ||
135 | { | ||
136 | return readl(ccu->base + reg_offset); | ||
137 | } | ||
138 | |||
139 | /* Write a 32-bit register value into a CCU's address space. */ | ||
140 | static inline void | ||
141 | __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) | ||
142 | { | ||
143 | writel(reg_val, ccu->base + reg_offset); | ||
144 | } | ||
145 | |||
146 | static inline unsigned long ccu_lock(struct ccu_data *ccu) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | spin_lock_irqsave(&ccu->lock, flags); | ||
151 | |||
152 | return flags; | ||
153 | } | ||
154 | static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) | ||
155 | { | ||
156 | spin_unlock_irqrestore(&ccu->lock, flags); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Enable/disable write access to CCU protected registers. The | ||
161 | * WR_ACCESS register for all CCUs is at offset 0. | ||
162 | */ | ||
163 | static inline void __ccu_write_enable(struct ccu_data *ccu) | ||
164 | { | ||
165 | if (ccu->write_enabled) { | ||
166 | pr_err("%s: access already enabled for %s\n", __func__, | ||
167 | ccu->name); | ||
168 | return; | ||
169 | } | ||
170 | ccu->write_enabled = true; | ||
171 | __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); | ||
172 | } | ||
173 | |||
174 | static inline void __ccu_write_disable(struct ccu_data *ccu) | ||
175 | { | ||
176 | if (!ccu->write_enabled) { | ||
177 | pr_err("%s: access wasn't enabled for %s\n", __func__, | ||
178 | ccu->name); | ||
179 | return; | ||
180 | } | ||
181 | |||
182 | __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); | ||
183 | ccu->write_enabled = false; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Poll a register in a CCU's address space, returning when the | ||
188 | * specified bit in that register's value is set (or clear). Delay | ||
189 | * a microsecond after each read of the register. Returns true if | ||
190 | * successful, or false if we gave up trying. | ||
191 | * | ||
192 | * Caller must ensure the CCU lock is held. | ||
193 | */ | ||
194 | static inline bool | ||
195 | __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) | ||
196 | { | ||
197 | unsigned int tries; | ||
198 | u32 bit_mask = 1 << bit; | ||
199 | |||
200 | for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { | ||
201 | u32 val; | ||
202 | bool bit_val; | ||
203 | |||
204 | val = __ccu_read(ccu, reg_offset); | ||
205 | bit_val = (val & bit_mask) != 0; | ||
206 | if (bit_val == want) | ||
207 | return true; | ||
208 | udelay(1); | ||
209 | } | ||
210 | return false; | ||
211 | } | ||
212 | |||
213 | /* Gate operations */ | ||
214 | |||
215 | /* Determine whether a clock is gated. CCU lock must be held. */ | ||
216 | static bool | ||
217 | __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) | ||
218 | { | ||
219 | u32 bit_mask; | ||
220 | u32 reg_val; | ||
221 | |||
222 | /* If there is no gate we can assume it's enabled. */ | ||
223 | if (!gate_exists(gate)) | ||
224 | return true; | ||
225 | |||
226 | bit_mask = 1 << gate->status_bit; | ||
227 | reg_val = __ccu_read(ccu, gate->offset); | ||
228 | |||
229 | return (reg_val & bit_mask) != 0; | ||
230 | } | ||
231 | |||
232 | /* Determine whether a clock is gated. */ | ||
233 | static bool | ||
234 | is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) | ||
235 | { | ||
236 | long flags; | ||
237 | bool ret; | ||
238 | |||
239 | /* Avoid taking the lock if we can */ | ||
240 | if (!gate_exists(gate)) | ||
241 | return true; | ||
242 | |||
243 | flags = ccu_lock(ccu); | ||
244 | ret = __is_clk_gate_enabled(ccu, gate); | ||
245 | ccu_unlock(ccu, flags); | ||
246 | |||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * Commit our desired gate state to the hardware. | ||
252 | * Returns true if successful, false otherwise. | ||
253 | */ | ||
254 | static bool | ||
255 | __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) | ||
256 | { | ||
257 | u32 reg_val; | ||
258 | u32 mask; | ||
259 | bool enabled = false; | ||
260 | |||
261 | BUG_ON(!gate_exists(gate)); | ||
262 | if (!gate_is_sw_controllable(gate)) | ||
263 | return true; /* Nothing we can change */ | ||
264 | |||
265 | reg_val = __ccu_read(ccu, gate->offset); | ||
266 | |||
267 | /* For a hardware/software gate, set which is in control */ | ||
268 | if (gate_is_hw_controllable(gate)) { | ||
269 | mask = (u32)1 << gate->hw_sw_sel_bit; | ||
270 | if (gate_is_sw_managed(gate)) | ||
271 | reg_val |= mask; | ||
272 | else | ||
273 | reg_val &= ~mask; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * If software is in control, enable or disable the gate. | ||
278 | * If hardware is, clear the enabled bit for good measure. | ||
279 | * If a software controlled gate can't be disabled, we're | ||
280 | * required to write a 0 into the enable bit (but the gate | ||
281 | * will be enabled). | ||
282 | */ | ||
283 | mask = (u32)1 << gate->en_bit; | ||
284 | if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && | ||
285 | !gate_is_no_disable(gate)) | ||
286 | reg_val |= mask; | ||
287 | else | ||
288 | reg_val &= ~mask; | ||
289 | |||
290 | __ccu_write(ccu, gate->offset, reg_val); | ||
291 | |||
292 | /* For a hardware controlled gate, we're done */ | ||
293 | if (!gate_is_sw_managed(gate)) | ||
294 | return true; | ||
295 | |||
296 | /* Otherwise wait for the gate to be in desired state */ | ||
297 | return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Initialize a gate. Our desired state (hardware/software select, | ||
302 | * and if software, its enable state) is committed to hardware | ||
303 | * without the usual checks to see if it's already set up that way. | ||
304 | * Returns true if successful, false otherwise. | ||
305 | */ | ||
306 | static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) | ||
307 | { | ||
308 | if (!gate_exists(gate)) | ||
309 | return true; | ||
310 | return __gate_commit(ccu, gate); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * Set a gate to enabled or disabled state. Does nothing if the | ||
315 | * gate is not currently under software control, or if it is already | ||
316 | * in the requested state. Returns true if successful, false | ||
317 | * otherwise. CCU lock must be held. | ||
318 | */ | ||
319 | static bool | ||
320 | __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) | ||
321 | { | ||
322 | bool ret; | ||
323 | |||
324 | if (!gate_exists(gate) || !gate_is_sw_managed(gate)) | ||
325 | return true; /* Nothing to do */ | ||
326 | |||
327 | if (!enable && gate_is_no_disable(gate)) { | ||
328 | pr_warn("%s: invalid gate disable request (ignoring)\n", | ||
329 | __func__); | ||
330 | return true; | ||
331 | } | ||
332 | |||
333 | if (enable == gate_is_enabled(gate)) | ||
334 | return true; /* No change */ | ||
335 | |||
336 | gate_flip_enabled(gate); | ||
337 | ret = __gate_commit(ccu, gate); | ||
338 | if (!ret) | ||
339 | gate_flip_enabled(gate); /* Revert the change */ | ||
340 | |||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ | ||
345 | static int clk_gate(struct ccu_data *ccu, const char *name, | ||
346 | struct bcm_clk_gate *gate, bool enable) | ||
347 | { | ||
348 | unsigned long flags; | ||
349 | bool success; | ||
350 | |||
351 | /* | ||
352 | * Avoid taking the lock if we can. We quietly ignore | ||
353 | * requests to change state that don't make sense. | ||
354 | */ | ||
355 | if (!gate_exists(gate) || !gate_is_sw_managed(gate)) | ||
356 | return 0; | ||
357 | if (!enable && gate_is_no_disable(gate)) | ||
358 | return 0; | ||
359 | |||
360 | flags = ccu_lock(ccu); | ||
361 | __ccu_write_enable(ccu); | ||
362 | |||
363 | success = __clk_gate(ccu, gate, enable); | ||
364 | |||
365 | __ccu_write_disable(ccu); | ||
366 | ccu_unlock(ccu, flags); | ||
367 | |||
368 | if (success) | ||
369 | return 0; | ||
370 | |||
371 | pr_err("%s: failed to %s gate for %s\n", __func__, | ||
372 | enable ? "enable" : "disable", name); | ||
373 | |||
374 | return -EIO; | ||
375 | } | ||
376 | |||
377 | /* Trigger operations */ | ||
378 | |||
379 | /* | ||
380 | * Caller must ensure CCU lock is held and access is enabled. | ||
381 | * Returns true if successful, false otherwise. | ||
382 | */ | ||
383 | static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) | ||
384 | { | ||
385 | /* Trigger the clock and wait for it to finish */ | ||
386 | __ccu_write(ccu, trig->offset, 1 << trig->bit); | ||
387 | |||
388 | return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); | ||
389 | } | ||
390 | |||
391 | /* Divider operations */ | ||
392 | |||
393 | /* Read a divider value and return the scaled divisor it represents. */ | ||
394 | static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) | ||
395 | { | ||
396 | unsigned long flags; | ||
397 | u32 reg_val; | ||
398 | u32 reg_div; | ||
399 | |||
400 | if (divider_is_fixed(div)) | ||
401 | return (u64)div->fixed; | ||
402 | |||
403 | flags = ccu_lock(ccu); | ||
404 | reg_val = __ccu_read(ccu, div->offset); | ||
405 | ccu_unlock(ccu, flags); | ||
406 | |||
407 | /* Extract the full divider field from the register value */ | ||
408 | reg_div = bitfield_extract(reg_val, div->shift, div->width); | ||
409 | |||
410 | /* Return the scaled divisor value it represents */ | ||
411 | return scaled_div_value(div, reg_div); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Convert a divider's scaled divisor value into its recorded form | ||
416 | * and commit it into the hardware divider register. | ||
417 | * | ||
418 | * Returns 0 on success. Returns -EINVAL for invalid arguments. | ||
419 | * Returns -ENXIO if gating failed, and -EIO if a trigger failed. | ||
420 | */ | ||
421 | static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
422 | struct bcm_clk_div *div, struct bcm_clk_trig *trig) | ||
423 | { | ||
424 | bool enabled; | ||
425 | u32 reg_div; | ||
426 | u32 reg_val; | ||
427 | int ret = 0; | ||
428 | |||
429 | BUG_ON(divider_is_fixed(div)); | ||
430 | |||
431 | /* | ||
432 | * If we're just initializing the divider, and no initial | ||
433 | * state was defined in the device tree, we just find out | ||
434 | * what its current value is rather than updating it. | ||
435 | */ | ||
436 | if (div->scaled_div == BAD_SCALED_DIV_VALUE) { | ||
437 | reg_val = __ccu_read(ccu, div->offset); | ||
438 | reg_div = bitfield_extract(reg_val, div->shift, div->width); | ||
439 | div->scaled_div = scaled_div_value(div, reg_div); | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /* Convert the scaled divisor to the value we need to record */ | ||
445 | reg_div = divider(div, div->scaled_div); | ||
446 | |||
447 | /* Clock needs to be enabled before changing the rate */ | ||
448 | enabled = __is_clk_gate_enabled(ccu, gate); | ||
449 | if (!enabled && !__clk_gate(ccu, gate, true)) { | ||
450 | ret = -ENXIO; | ||
451 | goto out; | ||
452 | } | ||
453 | |||
454 | /* Replace the divider value and record the result */ | ||
455 | reg_val = __ccu_read(ccu, div->offset); | ||
456 | reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); | ||
457 | __ccu_write(ccu, div->offset, reg_val); | ||
458 | |||
459 | /* If the trigger fails we still want to disable the gate */ | ||
460 | if (!__clk_trigger(ccu, trig)) | ||
461 | ret = -EIO; | ||
462 | |||
463 | /* Disable the clock again if it was disabled to begin with */ | ||
464 | if (!enabled && !__clk_gate(ccu, gate, false)) | ||
465 | ret = ret ? ret : -ENXIO; /* return first error */ | ||
466 | out: | ||
467 | return ret; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Initialize a divider by committing our desired state to hardware | ||
472 | * without the usual checks to see if it's already set up that way. | ||
473 | * Returns true if successful, false otherwise. | ||
474 | */ | ||
475 | static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
476 | struct bcm_clk_div *div, struct bcm_clk_trig *trig) | ||
477 | { | ||
478 | if (!divider_exists(div) || divider_is_fixed(div)) | ||
479 | return true; | ||
480 | return !__div_commit(ccu, gate, div, trig); | ||
481 | } | ||
482 | |||
483 | static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
484 | struct bcm_clk_div *div, struct bcm_clk_trig *trig, | ||
485 | u64 scaled_div) | ||
486 | { | ||
487 | unsigned long flags; | ||
488 | u64 previous; | ||
489 | int ret; | ||
490 | |||
491 | BUG_ON(divider_is_fixed(div)); | ||
492 | |||
493 | previous = div->scaled_div; | ||
494 | if (previous == scaled_div) | ||
495 | return 0; /* No change */ | ||
496 | |||
497 | div->scaled_div = scaled_div; | ||
498 | |||
499 | flags = ccu_lock(ccu); | ||
500 | __ccu_write_enable(ccu); | ||
501 | |||
502 | ret = __div_commit(ccu, gate, div, trig); | ||
503 | |||
504 | __ccu_write_disable(ccu); | ||
505 | ccu_unlock(ccu, flags); | ||
506 | |||
507 | if (ret) | ||
508 | div->scaled_div = previous; /* Revert the change */ | ||
509 | |||
510 | return ret; | ||
511 | |||
512 | } | ||
513 | |||
514 | /* Common clock rate helpers */ | ||
515 | |||
516 | /* | ||
517 | * Implement the common clock framework recalc_rate method, taking | ||
518 | * into account a divider and an optional pre-divider. The | ||
519 | * pre-divider register pointer may be NULL. | ||
520 | */ | ||
521 | static unsigned long clk_recalc_rate(struct ccu_data *ccu, | ||
522 | struct bcm_clk_div *div, struct bcm_clk_div *pre_div, | ||
523 | unsigned long parent_rate) | ||
524 | { | ||
525 | u64 scaled_parent_rate; | ||
526 | u64 scaled_div; | ||
527 | u64 result; | ||
528 | |||
529 | if (!divider_exists(div)) | ||
530 | return parent_rate; | ||
531 | |||
532 | if (parent_rate > (unsigned long)LONG_MAX) | ||
533 | return 0; /* actually this would be a caller bug */ | ||
534 | |||
535 | /* | ||
536 | * If there is a pre-divider, divide the scaled parent rate | ||
537 | * by the pre-divider value first. In this case--to improve | ||
538 | * accuracy--scale the parent rate by *both* the pre-divider | ||
539 | * value and the divider before actually computing the | ||
540 | * result of the pre-divider. | ||
541 | * | ||
542 | * If there's only one divider, just scale the parent rate. | ||
543 | */ | ||
544 | if (pre_div && divider_exists(pre_div)) { | ||
545 | u64 scaled_rate; | ||
546 | |||
547 | scaled_rate = scale_rate(pre_div, parent_rate); | ||
548 | scaled_rate = scale_rate(div, scaled_rate); | ||
549 | scaled_div = divider_read_scaled(ccu, pre_div); | ||
550 | scaled_parent_rate = do_div_round_closest(scaled_rate, | ||
551 | scaled_div); | ||
552 | } else { | ||
553 | scaled_parent_rate = scale_rate(div, parent_rate); | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Get the scaled divisor value, and divide the scaled | ||
558 | * parent rate by that to determine this clock's resulting | ||
559 | * rate. | ||
560 | */ | ||
561 | scaled_div = divider_read_scaled(ccu, div); | ||
562 | result = do_div_round_closest(scaled_parent_rate, scaled_div); | ||
563 | |||
564 | return (unsigned long)result; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Compute the output rate produced when a given parent rate is fed | ||
569 | * into two dividers. The pre-divider can be NULL, and even if it's | ||
570 | * non-null it may be nonexistent. It's also OK for the divider to | ||
571 | * be nonexistent, and in that case the pre-divider is also ignored. | ||
572 | * | ||
573 | * If scaled_div is non-null, it is used to return the scaled divisor | ||
574 | * value used by the (downstream) divider to produce that rate. | ||
575 | */ | ||
576 | static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, | ||
577 | struct bcm_clk_div *pre_div, | ||
578 | unsigned long rate, unsigned long parent_rate, | ||
579 | u64 *scaled_div) | ||
580 | { | ||
581 | u64 scaled_parent_rate; | ||
582 | u64 min_scaled_div; | ||
583 | u64 max_scaled_div; | ||
584 | u64 best_scaled_div; | ||
585 | u64 result; | ||
586 | |||
587 | BUG_ON(!divider_exists(div)); | ||
588 | BUG_ON(!rate); | ||
589 | BUG_ON(parent_rate > (u64)LONG_MAX); | ||
590 | |||
591 | /* | ||
592 | * If there is a pre-divider, divide the scaled parent rate | ||
593 | * by the pre-divider value first. In this case--to improve | ||
594 | * accuracy--scale the parent rate by *both* the pre-divider | ||
595 | * value and the divider before actually computing the | ||
596 | * result of the pre-divider. | ||
597 | * | ||
598 | * If there's only one divider, just scale the parent rate. | ||
599 | * | ||
600 | * For simplicity we treat the pre-divider as fixed (for now). | ||
601 | */ | ||
602 | if (divider_exists(pre_div)) { | ||
603 | u64 scaled_rate; | ||
604 | u64 scaled_pre_div; | ||
605 | |||
606 | scaled_rate = scale_rate(pre_div, parent_rate); | ||
607 | scaled_rate = scale_rate(div, scaled_rate); | ||
608 | scaled_pre_div = divider_read_scaled(ccu, pre_div); | ||
609 | scaled_parent_rate = do_div_round_closest(scaled_rate, | ||
610 | scaled_pre_div); | ||
611 | } else { | ||
612 | scaled_parent_rate = scale_rate(div, parent_rate); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Compute the best possible divider and ensure it is in | ||
617 | * range. A fixed divider can't be changed, so just report | ||
618 | * the best we can do. | ||
619 | */ | ||
620 | if (!divider_is_fixed(div)) { | ||
621 | best_scaled_div = do_div_round_closest(scaled_parent_rate, | ||
622 | rate); | ||
623 | min_scaled_div = scaled_div_min(div); | ||
624 | max_scaled_div = scaled_div_max(div); | ||
625 | if (best_scaled_div > max_scaled_div) | ||
626 | best_scaled_div = max_scaled_div; | ||
627 | else if (best_scaled_div < min_scaled_div) | ||
628 | best_scaled_div = min_scaled_div; | ||
629 | } else { | ||
630 | best_scaled_div = divider_read_scaled(ccu, div); | ||
631 | } | ||
632 | |||
633 | /* OK, figure out the resulting rate */ | ||
634 | result = do_div_round_closest(scaled_parent_rate, best_scaled_div); | ||
635 | |||
636 | if (scaled_div) | ||
637 | *scaled_div = best_scaled_div; | ||
638 | |||
639 | return (long)result; | ||
640 | } | ||
641 | |||
642 | /* Common clock parent helpers */ | ||
643 | |||
644 | /* | ||
645 | * For a given parent selector (register field) value, find the | ||
646 | * index into a selector's parent_sel array that contains it. | ||
647 | * Returns the index, or BAD_CLK_INDEX if it's not found. | ||
648 | */ | ||
649 | static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) | ||
650 | { | ||
651 | u8 i; | ||
652 | |||
653 | BUG_ON(sel->parent_count > (u32)U8_MAX); | ||
654 | for (i = 0; i < sel->parent_count; i++) | ||
655 | if (sel->parent_sel[i] == parent_sel) | ||
656 | return i; | ||
657 | return BAD_CLK_INDEX; | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Fetch the current value of the selector, and translate that into | ||
662 | * its corresponding index in the parent array we registered with | ||
663 | * the clock framework. | ||
664 | * | ||
665 | * Returns parent array index that corresponds with the value found, | ||
666 | * or BAD_CLK_INDEX if the found value is out of range. | ||
667 | */ | ||
668 | static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) | ||
669 | { | ||
670 | unsigned long flags; | ||
671 | u32 reg_val; | ||
672 | u32 parent_sel; | ||
673 | u8 index; | ||
674 | |||
675 | /* If there's no selector, there's only one parent */ | ||
676 | if (!selector_exists(sel)) | ||
677 | return 0; | ||
678 | |||
679 | /* Get the value in the selector register */ | ||
680 | flags = ccu_lock(ccu); | ||
681 | reg_val = __ccu_read(ccu, sel->offset); | ||
682 | ccu_unlock(ccu, flags); | ||
683 | |||
684 | parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); | ||
685 | |||
686 | /* Look up that selector's parent array index and return it */ | ||
687 | index = parent_index(sel, parent_sel); | ||
688 | if (index == BAD_CLK_INDEX) | ||
689 | pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", | ||
690 | __func__, parent_sel, ccu->name, sel->offset); | ||
691 | |||
692 | return index; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Commit our desired selector value to the hardware. | ||
697 | * | ||
698 | * Returns 0 on success. Returns -EINVAL for invalid arguments. | ||
699 | * Returns -ENXIO if gating failed, and -EIO if a trigger failed. | ||
700 | */ | ||
701 | static int | ||
702 | __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
703 | struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) | ||
704 | { | ||
705 | u32 parent_sel; | ||
706 | u32 reg_val; | ||
707 | bool enabled; | ||
708 | int ret = 0; | ||
709 | |||
710 | BUG_ON(!selector_exists(sel)); | ||
711 | |||
712 | /* | ||
713 | * If we're just initializing the selector, and no initial | ||
714 | * state was defined in the device tree, we just find out | ||
715 | * what its current value is rather than updating it. | ||
716 | */ | ||
717 | if (sel->clk_index == BAD_CLK_INDEX) { | ||
718 | u8 index; | ||
719 | |||
720 | reg_val = __ccu_read(ccu, sel->offset); | ||
721 | parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); | ||
722 | index = parent_index(sel, parent_sel); | ||
723 | if (index == BAD_CLK_INDEX) | ||
724 | return -EINVAL; | ||
725 | sel->clk_index = index; | ||
726 | |||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | BUG_ON((u32)sel->clk_index >= sel->parent_count); | ||
731 | parent_sel = sel->parent_sel[sel->clk_index]; | ||
732 | |||
733 | /* Clock needs to be enabled before changing the parent */ | ||
734 | enabled = __is_clk_gate_enabled(ccu, gate); | ||
735 | if (!enabled && !__clk_gate(ccu, gate, true)) | ||
736 | return -ENXIO; | ||
737 | |||
738 | /* Replace the selector value and record the result */ | ||
739 | reg_val = __ccu_read(ccu, sel->offset); | ||
740 | reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); | ||
741 | __ccu_write(ccu, sel->offset, reg_val); | ||
742 | |||
743 | /* If the trigger fails we still want to disable the gate */ | ||
744 | if (!__clk_trigger(ccu, trig)) | ||
745 | ret = -EIO; | ||
746 | |||
747 | /* Disable the clock again if it was disabled to begin with */ | ||
748 | if (!enabled && !__clk_gate(ccu, gate, false)) | ||
749 | ret = ret ? ret : -ENXIO; /* return first error */ | ||
750 | |||
751 | return ret; | ||
752 | } | ||
753 | |||
754 | /* | ||
755 | * Initialize a selector by committing our desired state to hardware | ||
756 | * without the usual checks to see if it's already set up that way. | ||
757 | * Returns true if successful, false otherwise. | ||
758 | */ | ||
759 | static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
760 | struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) | ||
761 | { | ||
762 | if (!selector_exists(sel)) | ||
763 | return true; | ||
764 | return !__sel_commit(ccu, gate, sel, trig); | ||
765 | } | ||
766 | |||
767 | /* | ||
768 | * Write a new value into a selector register to switch to a | ||
769 | * different parent clock. Returns 0 on success, or an error code | ||
770 | * (from __sel_commit()) otherwise. | ||
771 | */ | ||
772 | static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, | ||
773 | struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, | ||
774 | u8 index) | ||
775 | { | ||
776 | unsigned long flags; | ||
777 | u8 previous; | ||
778 | int ret; | ||
779 | |||
780 | previous = sel->clk_index; | ||
781 | if (previous == index) | ||
782 | return 0; /* No change */ | ||
783 | |||
784 | sel->clk_index = index; | ||
785 | |||
786 | flags = ccu_lock(ccu); | ||
787 | __ccu_write_enable(ccu); | ||
788 | |||
789 | ret = __sel_commit(ccu, gate, sel, trig); | ||
790 | |||
791 | __ccu_write_disable(ccu); | ||
792 | ccu_unlock(ccu, flags); | ||
793 | |||
794 | if (ret) | ||
795 | sel->clk_index = previous; /* Revert the change */ | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | /* Clock operations */ | ||
801 | |||
802 | static int kona_peri_clk_enable(struct clk_hw *hw) | ||
803 | { | ||
804 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
805 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | ||
806 | |||
807 | return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); | ||
808 | } | ||
809 | |||
810 | static void kona_peri_clk_disable(struct clk_hw *hw) | ||
811 | { | ||
812 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
813 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | ||
814 | |||
815 | (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); | ||
816 | } | ||
817 | |||
818 | static int kona_peri_clk_is_enabled(struct clk_hw *hw) | ||
819 | { | ||
820 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
821 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | ||
822 | |||
823 | return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; | ||
824 | } | ||
825 | |||
826 | static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, | ||
827 | unsigned long parent_rate) | ||
828 | { | ||
829 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
830 | struct peri_clk_data *data = bcm_clk->peri; | ||
831 | |||
832 | return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, | ||
833 | parent_rate); | ||
834 | } | ||
835 | |||
836 | static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, | ||
837 | unsigned long *parent_rate) | ||
838 | { | ||
839 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
840 | struct bcm_clk_div *div = &bcm_clk->peri->div; | ||
841 | |||
842 | if (!divider_exists(div)) | ||
843 | return __clk_get_rate(hw->clk); | ||
844 | |||
845 | /* Quietly avoid a zero rate */ | ||
846 | return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, | ||
847 | rate ? rate : 1, *parent_rate, NULL); | ||
848 | } | ||
849 | |||
850 | static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) | ||
851 | { | ||
852 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
853 | struct peri_clk_data *data = bcm_clk->peri; | ||
854 | struct bcm_clk_sel *sel = &data->sel; | ||
855 | struct bcm_clk_trig *trig; | ||
856 | int ret; | ||
857 | |||
858 | BUG_ON(index >= sel->parent_count); | ||
859 | |||
860 | /* If there's only one parent we don't require a selector */ | ||
861 | if (!selector_exists(sel)) | ||
862 | return 0; | ||
863 | |||
864 | /* | ||
865 | * The regular trigger is used by default, but if there's a | ||
866 | * pre-trigger we want to use that instead. | ||
867 | */ | ||
868 | trig = trigger_exists(&data->pre_trig) ? &data->pre_trig | ||
869 | : &data->trig; | ||
870 | |||
871 | ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); | ||
872 | if (ret == -ENXIO) { | ||
873 | pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); | ||
874 | ret = -EIO; /* Don't proliferate weird errors */ | ||
875 | } else if (ret == -EIO) { | ||
876 | pr_err("%s: %strigger failed for %s\n", __func__, | ||
877 | trig == &data->pre_trig ? "pre-" : "", | ||
878 | bcm_clk->name); | ||
879 | } | ||
880 | |||
881 | return ret; | ||
882 | } | ||
883 | |||
884 | static u8 kona_peri_clk_get_parent(struct clk_hw *hw) | ||
885 | { | ||
886 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
887 | struct peri_clk_data *data = bcm_clk->peri; | ||
888 | u8 index; | ||
889 | |||
890 | index = selector_read_index(bcm_clk->ccu, &data->sel); | ||
891 | |||
892 | /* Not all callers would handle an out-of-range value gracefully */ | ||
893 | return index == BAD_CLK_INDEX ? 0 : index; | ||
894 | } | ||
895 | |||
896 | static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, | ||
897 | unsigned long parent_rate) | ||
898 | { | ||
899 | struct kona_clk *bcm_clk = to_kona_clk(hw); | ||
900 | struct peri_clk_data *data = bcm_clk->peri; | ||
901 | struct bcm_clk_div *div = &data->div; | ||
902 | u64 scaled_div = 0; | ||
903 | int ret; | ||
904 | |||
905 | if (parent_rate > (unsigned long)LONG_MAX) | ||
906 | return -EINVAL; | ||
907 | |||
908 | if (rate == __clk_get_rate(hw->clk)) | ||
909 | return 0; | ||
910 | |||
911 | if (!divider_exists(div)) | ||
912 | return rate == parent_rate ? 0 : -EINVAL; | ||
913 | |||
914 | /* | ||
915 | * A fixed divider can't be changed. (Nor can a fixed | ||
916 | * pre-divider be, but for now we never actually try to | ||
917 | * change that.) Tolerate a request for a no-op change. | ||
918 | */ | ||
919 | if (divider_is_fixed(&data->div)) | ||
920 | return rate == parent_rate ? 0 : -EINVAL; | ||
921 | |||
922 | /* | ||
923 | * Get the scaled divisor value needed to achieve a clock | ||
924 | * rate as close as possible to what was requested, given | ||
925 | * the parent clock rate supplied. | ||
926 | */ | ||
927 | (void)round_rate(bcm_clk->ccu, div, &data->pre_div, | ||
928 | rate ? rate : 1, parent_rate, &scaled_div); | ||
929 | |||
930 | /* | ||
931 | * We aren't updating any pre-divider at this point, so | ||
932 | * we'll use the regular trigger. | ||
933 | */ | ||
934 | ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, | ||
935 | &data->trig, scaled_div); | ||
936 | if (ret == -ENXIO) { | ||
937 | pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); | ||
938 | ret = -EIO; /* Don't proliferate weird errors */ | ||
939 | } else if (ret == -EIO) { | ||
940 | pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name); | ||
941 | } | ||
942 | |||
943 | return ret; | ||
944 | } | ||
945 | |||
946 | struct clk_ops kona_peri_clk_ops = { | ||
947 | .enable = kona_peri_clk_enable, | ||
948 | .disable = kona_peri_clk_disable, | ||
949 | .is_enabled = kona_peri_clk_is_enabled, | ||
950 | .recalc_rate = kona_peri_clk_recalc_rate, | ||
951 | .round_rate = kona_peri_clk_round_rate, | ||
952 | .set_parent = kona_peri_clk_set_parent, | ||
953 | .get_parent = kona_peri_clk_get_parent, | ||
954 | .set_rate = kona_peri_clk_set_rate, | ||
955 | }; | ||
956 | |||
957 | /* Put a peripheral clock into its initial state */ | ||
958 | static bool __peri_clk_init(struct kona_clk *bcm_clk) | ||
959 | { | ||
960 | struct ccu_data *ccu = bcm_clk->ccu; | ||
961 | struct peri_clk_data *peri = bcm_clk->peri; | ||
962 | const char *name = bcm_clk->name; | ||
963 | struct bcm_clk_trig *trig; | ||
964 | |||
965 | BUG_ON(bcm_clk->type != bcm_clk_peri); | ||
966 | |||
967 | if (!gate_init(ccu, &peri->gate)) { | ||
968 | pr_err("%s: error initializing gate for %s\n", __func__, name); | ||
969 | return false; | ||
970 | } | ||
971 | if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { | ||
972 | pr_err("%s: error initializing divider for %s\n", __func__, | ||
973 | name); | ||
974 | return false; | ||
975 | } | ||
976 | |||
977 | /* | ||
978 | * For the pre-divider and selector, the pre-trigger is used | ||
979 | * if it's present, otherwise we just use the regular trigger. | ||
980 | */ | ||
981 | trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig | ||
982 | : &peri->trig; | ||
983 | |||
984 | if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { | ||
985 | pr_err("%s: error initializing pre-divider for %s\n", __func__, | ||
986 | name); | ||
987 | return false; | ||
988 | } | ||
989 | |||
990 | if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { | ||
991 | pr_err("%s: error initializing selector for %s\n", __func__, | ||
992 | name); | ||
993 | return false; | ||
994 | } | ||
995 | |||
996 | return true; | ||
997 | } | ||
998 | |||
999 | static bool __kona_clk_init(struct kona_clk *bcm_clk) | ||
1000 | { | ||
1001 | switch (bcm_clk->type) { | ||
1002 | case bcm_clk_peri: | ||
1003 | return __peri_clk_init(bcm_clk); | ||
1004 | default: | ||
1005 | BUG(); | ||
1006 | } | ||
1007 | return -EINVAL; | ||
1008 | } | ||
1009 | |||
1010 | /* Set a CCU and all its clocks into their desired initial state */ | ||
1011 | bool __init kona_ccu_init(struct ccu_data *ccu) | ||
1012 | { | ||
1013 | unsigned long flags; | ||
1014 | unsigned int which; | ||
1015 | struct clk **clks = ccu->data.clks; | ||
1016 | bool success = true; | ||
1017 | |||
1018 | flags = ccu_lock(ccu); | ||
1019 | __ccu_write_enable(ccu); | ||
1020 | |||
1021 | for (which = 0; which < ccu->data.clk_num; which++) { | ||
1022 | struct kona_clk *bcm_clk; | ||
1023 | |||
1024 | if (!clks[which]) | ||
1025 | continue; | ||
1026 | bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); | ||
1027 | success &= __kona_clk_init(bcm_clk); | ||
1028 | } | ||
1029 | |||
1030 | __ccu_write_disable(ccu); | ||
1031 | ccu_unlock(ccu, flags); | ||
1032 | return success; | ||
1033 | } | ||
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h new file mode 100644 index 000000000000..5e139adc3dc5 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.h | |||
@@ -0,0 +1,410 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Broadcom Corporation | ||
3 | * Copyright 2013 Linaro Limited | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation version 2. | ||
8 | * | ||
9 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
10 | * kind, whether express or implied; without even the implied warranty | ||
11 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _CLK_KONA_H | ||
16 | #define _CLK_KONA_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/clk-provider.h> | ||
25 | |||
26 | #define BILLION 1000000000 | ||
27 | |||
28 | /* The common clock framework uses u8 to represent a parent index */ | ||
29 | #define PARENT_COUNT_MAX ((u32)U8_MAX) | ||
30 | |||
31 | #define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */ | ||
32 | #define BAD_CLK_NAME ((const char *)-1) | ||
33 | |||
34 | #define BAD_SCALED_DIV_VALUE U64_MAX | ||
35 | |||
36 | /* | ||
37 | * Utility macros for object flag management. If possible, flags | ||
38 | * should be defined such that 0 is the desired default value. | ||
39 | */ | ||
40 | #define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag | ||
41 | #define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag)) | ||
42 | #define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag))) | ||
43 | #define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag)) | ||
44 | #define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag))) | ||
45 | |||
46 | /* Clock field state tests */ | ||
47 | |||
48 | #define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS) | ||
49 | #define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED) | ||
50 | #define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW) | ||
51 | #define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW) | ||
52 | #define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED) | ||
53 | #define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE) | ||
54 | |||
55 | #define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED) | ||
56 | |||
57 | #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) | ||
58 | #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) | ||
59 | #define divider_has_fraction(div) (!divider_is_fixed(div) && \ | ||
60 | (div)->frac_width > 0) | ||
61 | |||
62 | #define selector_exists(sel) ((sel)->width != 0) | ||
63 | #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) | ||
64 | |||
65 | /* Clock type, used to tell common block what it's part of */ | ||
66 | enum bcm_clk_type { | ||
67 | bcm_clk_none, /* undefined clock type */ | ||
68 | bcm_clk_bus, | ||
69 | bcm_clk_core, | ||
70 | bcm_clk_peri | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * Each CCU defines a mapped area of memory containing registers | ||
75 | * used to manage clocks implemented by the CCU. Access to memory | ||
76 | * within the CCU's space is serialized by a spinlock. Before any | ||
77 | * (other) address can be written, a special access "password" value | ||
78 | * must be written to its WR_ACCESS register (located at the base | ||
79 | * address of the range). We keep track of the name of each CCU as | ||
80 | * it is set up, and maintain them in a list. | ||
81 | */ | ||
82 | struct ccu_data { | ||
83 | void __iomem *base; /* base of mapped address space */ | ||
84 | spinlock_t lock; /* serialization lock */ | ||
85 | bool write_enabled; /* write access is currently enabled */ | ||
86 | struct list_head links; /* for ccu_list */ | ||
87 | struct device_node *node; | ||
88 | struct clk_onecell_data data; | ||
89 | const char *name; | ||
90 | u32 range; /* byte range of address space */ | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * Gating control and status is managed by a 32-bit gate register. | ||
95 | * | ||
96 | * There are several types of gating available: | ||
97 | * - (no gate) | ||
98 | * A clock with no gate is assumed to be always enabled. | ||
99 | * - hardware-only gating (auto-gating) | ||
100 | * Enabling or disabling clocks with this type of gate is | ||
101 | * managed automatically by the hardware. Such clocks can be | ||
102 | * considered by the software to be enabled. The current status | ||
103 | * of auto-gated clocks can be read from the gate status bit. | ||
104 | * - software-only gating | ||
105 | * Auto-gating is not available for this type of clock. | ||
106 | * Instead, software manages whether it's enabled by setting or | ||
107 | * clearing the enable bit. The current gate status of a gate | ||
108 | * under software control can be read from the gate status bit. | ||
109 | * To ensure a change to the gating status is complete, the | ||
110 | * status bit can be polled to verify that the gate has entered | ||
111 | * the desired state. | ||
112 | * - selectable hardware or software gating | ||
113 | * Gating for this type of clock can be configured to be either | ||
114 | * under software or hardware control. Which type is in use is | ||
115 | * determined by the hw_sw_sel bit of the gate register. | ||
116 | */ | ||
117 | struct bcm_clk_gate { | ||
118 | u32 offset; /* gate register offset */ | ||
119 | u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */ | ||
120 | u32 en_bit; /* 0: disable; 1: enable */ | ||
121 | u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */ | ||
122 | u32 flags; /* BCM_CLK_GATE_FLAGS_* below */ | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Gate flags: | ||
127 | * HW means this gate can be auto-gated | ||
128 | * SW means the state of this gate can be software controlled | ||
129 | * NO_DISABLE means this gate is (only) enabled if under software control | ||
130 | * SW_MANAGED means the status of this gate is under software control | ||
131 | * ENABLED means this software-managed gate is *supposed* to be enabled | ||
132 | */ | ||
133 | #define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */ | ||
134 | #define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */ | ||
135 | #define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */ | ||
136 | #define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */ | ||
137 | #define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */ | ||
138 | #define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */ | ||
139 | |||
140 | /* | ||
141 | * Gate initialization macros. | ||
142 | * | ||
143 | * Any gate initially under software control will be enabled. | ||
144 | */ | ||
145 | |||
146 | /* A hardware/software gate initially under software control */ | ||
147 | #define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ | ||
148 | { \ | ||
149 | .offset = (_offset), \ | ||
150 | .status_bit = (_status_bit), \ | ||
151 | .en_bit = (_en_bit), \ | ||
152 | .hw_sw_sel_bit = (_hw_sw_sel_bit), \ | ||
153 | .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ | ||
154 | FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \ | ||
155 | FLAG(GATE, EXISTS), \ | ||
156 | } | ||
157 | |||
158 | /* A hardware/software gate initially under hardware control */ | ||
159 | #define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ | ||
160 | { \ | ||
161 | .offset = (_offset), \ | ||
162 | .status_bit = (_status_bit), \ | ||
163 | .en_bit = (_en_bit), \ | ||
164 | .hw_sw_sel_bit = (_hw_sw_sel_bit), \ | ||
165 | .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ | ||
166 | FLAG(GATE, EXISTS), \ | ||
167 | } | ||
168 | |||
169 | /* A hardware-or-enabled gate (enabled if not under hardware control) */ | ||
170 | #define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ | ||
171 | { \ | ||
172 | .offset = (_offset), \ | ||
173 | .status_bit = (_status_bit), \ | ||
174 | .en_bit = (_en_bit), \ | ||
175 | .hw_sw_sel_bit = (_hw_sw_sel_bit), \ | ||
176 | .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ | ||
177 | FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \ | ||
178 | } | ||
179 | |||
180 | /* A software-only gate */ | ||
181 | #define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \ | ||
182 | { \ | ||
183 | .offset = (_offset), \ | ||
184 | .status_bit = (_status_bit), \ | ||
185 | .en_bit = (_en_bit), \ | ||
186 | .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \ | ||
187 | FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \ | ||
188 | } | ||
189 | |||
190 | /* A hardware-only gate */ | ||
191 | #define HW_ONLY_GATE(_offset, _status_bit) \ | ||
192 | { \ | ||
193 | .offset = (_offset), \ | ||
194 | .status_bit = (_status_bit), \ | ||
195 | .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \ | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Each clock can have zero, one, or two dividers which change the | ||
200 | * output rate of the clock. Each divider can be either fixed or | ||
201 | * variable. If there are two dividers, they are the "pre-divider" | ||
202 | * and the "regular" or "downstream" divider. If there is only one, | ||
203 | * there is no pre-divider. | ||
204 | * | ||
205 | * A fixed divider is any non-zero (positive) value, and it | ||
206 | * indicates how the input rate is affected by the divider. | ||
207 | * | ||
208 | * The value of a variable divider is maintained in a sub-field of a | ||
209 | * 32-bit divider register. The position of the field in the | ||
210 | * register is defined by its offset and width. The value recorded | ||
211 | * in this field is always 1 less than the value it represents. | ||
212 | * | ||
213 | * In addition, a variable divider can indicate that some subset | ||
214 | * of its bits represent a "fractional" part of the divider. Such | ||
215 | * bits comprise the low-order portion of the divider field, and can | ||
216 | * be viewed as representing the portion of the divider that lies to | ||
217 | * the right of the decimal point. Most variable dividers have zero | ||
218 | * fractional bits. Variable dividers with non-zero fraction width | ||
219 | * still record a value 1 less than the value they represent; the | ||
220 | * added 1 does *not* affect the low-order bit in this case, it | ||
221 | * affects the bits above the fractional part only. (Often in this | ||
222 | * code a divider field value is distinguished from the value it | ||
223 | * represents by referring to the latter as a "divisor".) | ||
224 | * | ||
225 | * In order to avoid dealing with fractions, divider arithmetic is | ||
226 | * performed using "scaled" values. A scaled value is one that's | ||
227 | * been left-shifted by the fractional width of a divider. Dividing | ||
228 | * a scaled value by a scaled divisor produces the desired quotient | ||
229 | * without loss of precision and without any other special handling | ||
230 | * for fractions. | ||
231 | * | ||
232 | * The recorded value of a variable divider can be modified. To | ||
233 | * modify either divider (or both), a clock must be enabled (i.e., | ||
234 | * using its gate). In addition, a trigger register (described | ||
235 | * below) must be used to commit the change, and polled to verify | ||
236 | * the change is complete. | ||
237 | */ | ||
238 | struct bcm_clk_div { | ||
239 | union { | ||
240 | struct { /* variable divider */ | ||
241 | u32 offset; /* divider register offset */ | ||
242 | u32 shift; /* field shift */ | ||
243 | u32 width; /* field width */ | ||
244 | u32 frac_width; /* field fraction width */ | ||
245 | |||
246 | u64 scaled_div; /* scaled divider value */ | ||
247 | }; | ||
248 | u32 fixed; /* non-zero fixed divider value */ | ||
249 | }; | ||
250 | u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ | ||
251 | }; | ||
252 | |||
253 | /* | ||
254 | * Divider flags: | ||
255 | * EXISTS means this divider exists | ||
256 | * FIXED means it is a fixed-rate divider | ||
257 | */ | ||
258 | #define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */ | ||
259 | #define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */ | ||
260 | |||
261 | /* Divider initialization macros */ | ||
262 | |||
263 | /* A fixed (non-zero) divider */ | ||
264 | #define FIXED_DIVIDER(_value) \ | ||
265 | { \ | ||
266 | .fixed = (_value), \ | ||
267 | .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ | ||
268 | } | ||
269 | |||
270 | /* A divider with an integral divisor */ | ||
271 | #define DIVIDER(_offset, _shift, _width) \ | ||
272 | { \ | ||
273 | .offset = (_offset), \ | ||
274 | .shift = (_shift), \ | ||
275 | .width = (_width), \ | ||
276 | .scaled_div = BAD_SCALED_DIV_VALUE, \ | ||
277 | .flags = FLAG(DIV, EXISTS), \ | ||
278 | } | ||
279 | |||
280 | /* A divider whose divisor has an integer and fractional part */ | ||
281 | #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ | ||
282 | { \ | ||
283 | .offset = (_offset), \ | ||
284 | .shift = (_shift), \ | ||
285 | .width = (_width), \ | ||
286 | .frac_width = (_frac_width), \ | ||
287 | .scaled_div = BAD_SCALED_DIV_VALUE, \ | ||
288 | .flags = FLAG(DIV, EXISTS), \ | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Clocks may have multiple "parent" clocks. If there is more than | ||
293 | * one, a selector must be specified to define which of the parent | ||
294 | * clocks is currently in use. The selected clock is indicated in a | ||
295 | * sub-field of a 32-bit selector register. The range of | ||
296 | * representable selector values typically exceeds the number of | ||
297 | * available parent clocks. Occasionally the reset value of a | ||
298 | * selector field is explicitly set to a (specific) value that does | ||
299 | * not correspond to a defined input clock. | ||
300 | * | ||
301 | * We register all known parent clocks with the common clock code | ||
302 | * using a packed array (i.e., no empty slots) of (parent) clock | ||
303 | * names, and refer to them later using indexes into that array. | ||
304 | * We maintain an array of selector values indexed by common clock | ||
305 | * index values in order to map between these common clock indexes | ||
306 | * and the selector values used by the hardware. | ||
307 | * | ||
308 | * Like dividers, a selector can be modified, but to do so a clock | ||
309 | * must be enabled, and a trigger must be used to commit the change. | ||
310 | */ | ||
311 | struct bcm_clk_sel { | ||
312 | u32 offset; /* selector register offset */ | ||
313 | u32 shift; /* field shift */ | ||
314 | u32 width; /* field width */ | ||
315 | |||
316 | u32 parent_count; /* number of entries in parent_sel[] */ | ||
317 | u32 *parent_sel; /* array of parent selector values */ | ||
318 | u8 clk_index; /* current selected index in parent_sel[] */ | ||
319 | }; | ||
320 | |||
321 | /* Selector initialization macro */ | ||
322 | #define SELECTOR(_offset, _shift, _width) \ | ||
323 | { \ | ||
324 | .offset = (_offset), \ | ||
325 | .shift = (_shift), \ | ||
326 | .width = (_width), \ | ||
327 | .clk_index = BAD_CLK_INDEX, \ | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Making changes to a variable divider or a selector for a clock | ||
332 | * requires the use of a trigger. A trigger is defined by a single | ||
333 | * bit within a register. To signal a change, a 1 is written into | ||
334 | * that bit. To determine when the change has been completed, that | ||
335 | * trigger bit is polled; the read value will be 1 while the change | ||
336 | * is in progress, and 0 when it is complete. | ||
337 | * | ||
338 | * Occasionally a clock will have more than one trigger. In this | ||
339 | * case, the "pre-trigger" will be used when changing a clock's | ||
340 | * selector and/or its pre-divider. | ||
341 | */ | ||
342 | struct bcm_clk_trig { | ||
343 | u32 offset; /* trigger register offset */ | ||
344 | u32 bit; /* trigger bit */ | ||
345 | u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */ | ||
346 | }; | ||
347 | |||
348 | /* | ||
349 | * Trigger flags: | ||
350 | * EXISTS means this trigger exists | ||
351 | */ | ||
352 | #define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */ | ||
353 | |||
354 | /* Trigger initialization macro */ | ||
355 | #define TRIGGER(_offset, _bit) \ | ||
356 | { \ | ||
357 | .offset = (_offset), \ | ||
358 | .bit = (_bit), \ | ||
359 | .flags = FLAG(TRIG, EXISTS), \ | ||
360 | } | ||
361 | |||
362 | struct peri_clk_data { | ||
363 | struct bcm_clk_gate gate; | ||
364 | struct bcm_clk_trig pre_trig; | ||
365 | struct bcm_clk_div pre_div; | ||
366 | struct bcm_clk_trig trig; | ||
367 | struct bcm_clk_div div; | ||
368 | struct bcm_clk_sel sel; | ||
369 | const char *clocks[]; /* must be last; use CLOCKS() to declare */ | ||
370 | }; | ||
371 | #define CLOCKS(...) { __VA_ARGS__, NULL, } | ||
372 | #define NO_CLOCKS { NULL, } /* Must use of no parent clocks */ | ||
373 | |||
374 | struct kona_clk { | ||
375 | struct clk_hw hw; | ||
376 | struct clk_init_data init_data; | ||
377 | const char *name; /* name of this clock */ | ||
378 | struct ccu_data *ccu; /* ccu this clock is associated with */ | ||
379 | enum bcm_clk_type type; | ||
380 | union { | ||
381 | void *data; | ||
382 | struct peri_clk_data *peri; | ||
383 | }; | ||
384 | }; | ||
385 | #define to_kona_clk(_hw) \ | ||
386 | container_of(_hw, struct kona_clk, hw) | ||
387 | |||
388 | /* Exported globals */ | ||
389 | |||
390 | extern struct clk_ops kona_peri_clk_ops; | ||
391 | |||
392 | /* Help functions */ | ||
393 | |||
394 | #define PERI_CLK_SETUP(clks, ccu, id, name) \ | ||
395 | clks[id] = kona_clk_setup(ccu, #name, bcm_clk_peri, &name ## _data) | ||
396 | |||
397 | /* Externally visible functions */ | ||
398 | |||
399 | extern u64 do_div_round_closest(u64 dividend, unsigned long divisor); | ||
400 | extern u64 scaled_div_max(struct bcm_clk_div *div); | ||
401 | extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, | ||
402 | u32 billionths); | ||
403 | |||
404 | extern struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, | ||
405 | enum bcm_clk_type type, void *data); | ||
406 | extern void __init kona_dt_ccu_setup(struct device_node *node, | ||
407 | int (*ccu_clks_setup)(struct ccu_data *)); | ||
408 | extern bool __init kona_ccu_init(struct ccu_data *ccu); | ||
409 | |||
410 | #endif /* _CLK_KONA_H */ | ||
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 010f071af883..b4f967210175 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/clk-provider.h> | 16 | #include <linux/clk-provider.h> |
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/of_address.h> | 18 | #include <linux/of_address.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | 20 | ||
20 | #include "clk.h" | 21 | #include "clk.h" |
21 | 22 | ||
@@ -130,6 +131,17 @@ enum exynos4_plls { | |||
130 | nr_plls /* number of PLLs */ | 131 | nr_plls /* number of PLLs */ |
131 | }; | 132 | }; |
132 | 133 | ||
134 | static void __iomem *reg_base; | ||
135 | static enum exynos4_soc exynos4_soc; | ||
136 | |||
137 | /* | ||
138 | * Support for CMU save/restore across system suspends | ||
139 | */ | ||
140 | #ifdef CONFIG_PM_SLEEP | ||
141 | static struct samsung_clk_reg_dump *exynos4_save_common; | ||
142 | static struct samsung_clk_reg_dump *exynos4_save_soc; | ||
143 | static struct samsung_clk_reg_dump *exynos4_save_pll; | ||
144 | |||
133 | /* | 145 | /* |
134 | * list of controller registers to be saved and restored during a | 146 | * list of controller registers to be saved and restored during a |
135 | * suspend/resume cycle. | 147 | * suspend/resume cycle. |
@@ -154,6 +166,17 @@ static unsigned long exynos4x12_clk_save[] __initdata = { | |||
154 | E4X12_MPLL_CON0, | 166 | E4X12_MPLL_CON0, |
155 | }; | 167 | }; |
156 | 168 | ||
169 | static unsigned long exynos4_clk_pll_regs[] __initdata = { | ||
170 | EPLL_LOCK, | ||
171 | VPLL_LOCK, | ||
172 | EPLL_CON0, | ||
173 | EPLL_CON1, | ||
174 | EPLL_CON2, | ||
175 | VPLL_CON0, | ||
176 | VPLL_CON1, | ||
177 | VPLL_CON2, | ||
178 | }; | ||
179 | |||
157 | static unsigned long exynos4_clk_regs[] __initdata = { | 180 | static unsigned long exynos4_clk_regs[] __initdata = { |
158 | SRC_LEFTBUS, | 181 | SRC_LEFTBUS, |
159 | DIV_LEFTBUS, | 182 | DIV_LEFTBUS, |
@@ -161,12 +184,6 @@ static unsigned long exynos4_clk_regs[] __initdata = { | |||
161 | SRC_RIGHTBUS, | 184 | SRC_RIGHTBUS, |
162 | DIV_RIGHTBUS, | 185 | DIV_RIGHTBUS, |
163 | GATE_IP_RIGHTBUS, | 186 | GATE_IP_RIGHTBUS, |
164 | EPLL_CON0, | ||
165 | EPLL_CON1, | ||
166 | EPLL_CON2, | ||
167 | VPLL_CON0, | ||
168 | VPLL_CON1, | ||
169 | VPLL_CON2, | ||
170 | SRC_TOP0, | 187 | SRC_TOP0, |
171 | SRC_TOP1, | 188 | SRC_TOP1, |
172 | SRC_CAM, | 189 | SRC_CAM, |
@@ -227,6 +244,124 @@ static unsigned long exynos4_clk_regs[] __initdata = { | |||
227 | GATE_IP_CPU, | 244 | GATE_IP_CPU, |
228 | }; | 245 | }; |
229 | 246 | ||
247 | static const struct samsung_clk_reg_dump src_mask_suspend[] = { | ||
248 | { .offset = SRC_MASK_TOP, .value = 0x00000001, }, | ||
249 | { .offset = SRC_MASK_CAM, .value = 0x11111111, }, | ||
250 | { .offset = SRC_MASK_TV, .value = 0x00000111, }, | ||
251 | { .offset = SRC_MASK_LCD0, .value = 0x00001111, }, | ||
252 | { .offset = SRC_MASK_MAUDIO, .value = 0x00000001, }, | ||
253 | { .offset = SRC_MASK_FSYS, .value = 0x01011111, }, | ||
254 | { .offset = SRC_MASK_PERIL0, .value = 0x01111111, }, | ||
255 | { .offset = SRC_MASK_PERIL1, .value = 0x01110111, }, | ||
256 | { .offset = SRC_MASK_DMC, .value = 0x00010000, }, | ||
257 | }; | ||
258 | |||
259 | static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { | ||
260 | { .offset = E4210_SRC_MASK_LCD1, .value = 0x00001111, }, | ||
261 | }; | ||
262 | |||
263 | #define PLL_ENABLED (1 << 31) | ||
264 | #define PLL_LOCKED (1 << 29) | ||
265 | |||
266 | static void exynos4_clk_wait_for_pll(u32 reg) | ||
267 | { | ||
268 | u32 pll_con; | ||
269 | |||
270 | pll_con = readl(reg_base + reg); | ||
271 | if (!(pll_con & PLL_ENABLED)) | ||
272 | return; | ||
273 | |||
274 | while (!(pll_con & PLL_LOCKED)) { | ||
275 | cpu_relax(); | ||
276 | pll_con = readl(reg_base + reg); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static int exynos4_clk_suspend(void) | ||
281 | { | ||
282 | samsung_clk_save(reg_base, exynos4_save_common, | ||
283 | ARRAY_SIZE(exynos4_clk_regs)); | ||
284 | samsung_clk_save(reg_base, exynos4_save_pll, | ||
285 | ARRAY_SIZE(exynos4_clk_pll_regs)); | ||
286 | |||
287 | if (exynos4_soc == EXYNOS4210) { | ||
288 | samsung_clk_save(reg_base, exynos4_save_soc, | ||
289 | ARRAY_SIZE(exynos4210_clk_save)); | ||
290 | samsung_clk_restore(reg_base, src_mask_suspend_e4210, | ||
291 | ARRAY_SIZE(src_mask_suspend_e4210)); | ||
292 | } else { | ||
293 | samsung_clk_save(reg_base, exynos4_save_soc, | ||
294 | ARRAY_SIZE(exynos4x12_clk_save)); | ||
295 | } | ||
296 | |||
297 | samsung_clk_restore(reg_base, src_mask_suspend, | ||
298 | ARRAY_SIZE(src_mask_suspend)); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void exynos4_clk_resume(void) | ||
304 | { | ||
305 | samsung_clk_restore(reg_base, exynos4_save_pll, | ||
306 | ARRAY_SIZE(exynos4_clk_pll_regs)); | ||
307 | |||
308 | exynos4_clk_wait_for_pll(EPLL_CON0); | ||
309 | exynos4_clk_wait_for_pll(VPLL_CON0); | ||
310 | |||
311 | samsung_clk_restore(reg_base, exynos4_save_common, | ||
312 | ARRAY_SIZE(exynos4_clk_regs)); | ||
313 | |||
314 | if (exynos4_soc == EXYNOS4210) | ||
315 | samsung_clk_restore(reg_base, exynos4_save_soc, | ||
316 | ARRAY_SIZE(exynos4210_clk_save)); | ||
317 | else | ||
318 | samsung_clk_restore(reg_base, exynos4_save_soc, | ||
319 | ARRAY_SIZE(exynos4x12_clk_save)); | ||
320 | } | ||
321 | |||
322 | static struct syscore_ops exynos4_clk_syscore_ops = { | ||
323 | .suspend = exynos4_clk_suspend, | ||
324 | .resume = exynos4_clk_resume, | ||
325 | }; | ||
326 | |||
327 | static void exynos4_clk_sleep_init(void) | ||
328 | { | ||
329 | exynos4_save_common = samsung_clk_alloc_reg_dump(exynos4_clk_regs, | ||
330 | ARRAY_SIZE(exynos4_clk_regs)); | ||
331 | if (!exynos4_save_common) | ||
332 | goto err_warn; | ||
333 | |||
334 | if (exynos4_soc == EXYNOS4210) | ||
335 | exynos4_save_soc = samsung_clk_alloc_reg_dump( | ||
336 | exynos4210_clk_save, | ||
337 | ARRAY_SIZE(exynos4210_clk_save)); | ||
338 | else | ||
339 | exynos4_save_soc = samsung_clk_alloc_reg_dump( | ||
340 | exynos4x12_clk_save, | ||
341 | ARRAY_SIZE(exynos4x12_clk_save)); | ||
342 | if (!exynos4_save_soc) | ||
343 | goto err_common; | ||
344 | |||
345 | exynos4_save_pll = samsung_clk_alloc_reg_dump(exynos4_clk_pll_regs, | ||
346 | ARRAY_SIZE(exynos4_clk_pll_regs)); | ||
347 | if (!exynos4_save_pll) | ||
348 | goto err_soc; | ||
349 | |||
350 | register_syscore_ops(&exynos4_clk_syscore_ops); | ||
351 | return; | ||
352 | |||
353 | err_soc: | ||
354 | kfree(exynos4_save_soc); | ||
355 | err_common: | ||
356 | kfree(exynos4_save_common); | ||
357 | err_warn: | ||
358 | pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", | ||
359 | __func__); | ||
360 | } | ||
361 | #else | ||
362 | static void exynos4_clk_sleep_init(void) {} | ||
363 | #endif | ||
364 | |||
230 | /* list of all parent clock list */ | 365 | /* list of all parent clock list */ |
231 | PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; | 366 | PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; |
232 | PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; | 367 | PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; |
@@ -908,12 +1043,13 @@ static unsigned long exynos4_get_xom(void) | |||
908 | return xom; | 1043 | return xom; |
909 | } | 1044 | } |
910 | 1045 | ||
911 | static void __init exynos4_clk_register_finpll(unsigned long xom) | 1046 | static void __init exynos4_clk_register_finpll(void) |
912 | { | 1047 | { |
913 | struct samsung_fixed_rate_clock fclk; | 1048 | struct samsung_fixed_rate_clock fclk; |
914 | struct clk *clk; | 1049 | struct clk *clk; |
915 | unsigned long finpll_f = 24000000; | 1050 | unsigned long finpll_f = 24000000; |
916 | char *parent_name; | 1051 | char *parent_name; |
1052 | unsigned int xom = exynos4_get_xom(); | ||
917 | 1053 | ||
918 | parent_name = xom & 1 ? "xusbxti" : "xxti"; | 1054 | parent_name = xom & 1 ? "xusbxti" : "xxti"; |
919 | clk = clk_get(NULL, parent_name); | 1055 | clk = clk_get(NULL, parent_name); |
@@ -1038,27 +1174,21 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = { | |||
1038 | 1174 | ||
1039 | /* register exynos4 clocks */ | 1175 | /* register exynos4 clocks */ |
1040 | static void __init exynos4_clk_init(struct device_node *np, | 1176 | static void __init exynos4_clk_init(struct device_node *np, |
1041 | enum exynos4_soc exynos4_soc, | 1177 | enum exynos4_soc soc) |
1042 | void __iomem *reg_base, unsigned long xom) | ||
1043 | { | 1178 | { |
1179 | exynos4_soc = soc; | ||
1180 | |||
1044 | reg_base = of_iomap(np, 0); | 1181 | reg_base = of_iomap(np, 0); |
1045 | if (!reg_base) | 1182 | if (!reg_base) |
1046 | panic("%s: failed to map registers\n", __func__); | 1183 | panic("%s: failed to map registers\n", __func__); |
1047 | 1184 | ||
1048 | if (exynos4_soc == EXYNOS4210) | 1185 | samsung_clk_init(np, reg_base, CLK_NR_CLKS); |
1049 | samsung_clk_init(np, reg_base, CLK_NR_CLKS, | ||
1050 | exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), | ||
1051 | exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save)); | ||
1052 | else | ||
1053 | samsung_clk_init(np, reg_base, CLK_NR_CLKS, | ||
1054 | exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), | ||
1055 | exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save)); | ||
1056 | 1186 | ||
1057 | samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, | 1187 | samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, |
1058 | ARRAY_SIZE(exynos4_fixed_rate_ext_clks), | 1188 | ARRAY_SIZE(exynos4_fixed_rate_ext_clks), |
1059 | ext_clk_match); | 1189 | ext_clk_match); |
1060 | 1190 | ||
1061 | exynos4_clk_register_finpll(xom); | 1191 | exynos4_clk_register_finpll(); |
1062 | 1192 | ||
1063 | if (exynos4_soc == EXYNOS4210) { | 1193 | if (exynos4_soc == EXYNOS4210) { |
1064 | samsung_clk_register_mux(exynos4210_mux_early, | 1194 | samsung_clk_register_mux(exynos4210_mux_early, |
@@ -1125,6 +1255,8 @@ static void __init exynos4_clk_init(struct device_node *np, | |||
1125 | samsung_clk_register_alias(exynos4_aliases, | 1255 | samsung_clk_register_alias(exynos4_aliases, |
1126 | ARRAY_SIZE(exynos4_aliases)); | 1256 | ARRAY_SIZE(exynos4_aliases)); |
1127 | 1257 | ||
1258 | exynos4_clk_sleep_init(); | ||
1259 | |||
1128 | pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n" | 1260 | pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n" |
1129 | "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n", | 1261 | "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n", |
1130 | exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12", | 1262 | exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12", |
@@ -1136,12 +1268,12 @@ static void __init exynos4_clk_init(struct device_node *np, | |||
1136 | 1268 | ||
1137 | static void __init exynos4210_clk_init(struct device_node *np) | 1269 | static void __init exynos4210_clk_init(struct device_node *np) |
1138 | { | 1270 | { |
1139 | exynos4_clk_init(np, EXYNOS4210, NULL, exynos4_get_xom()); | 1271 | exynos4_clk_init(np, EXYNOS4210); |
1140 | } | 1272 | } |
1141 | CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init); | 1273 | CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init); |
1142 | 1274 | ||
1143 | static void __init exynos4412_clk_init(struct device_node *np) | 1275 | static void __init exynos4412_clk_init(struct device_node *np) |
1144 | { | 1276 | { |
1145 | exynos4_clk_init(np, EXYNOS4X12, NULL, exynos4_get_xom()); | 1277 | exynos4_clk_init(np, EXYNOS4X12); |
1146 | } | 1278 | } |
1147 | CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init); | 1279 | CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init); |
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index ff4beebe1f0b..e7ee4420da81 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/clk-provider.h> | 16 | #include <linux/clk-provider.h> |
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/of_address.h> | 18 | #include <linux/of_address.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | 20 | ||
20 | #include "clk.h" | 21 | #include "clk.h" |
21 | 22 | ||
@@ -85,6 +86,11 @@ enum exynos5250_plls { | |||
85 | nr_plls /* number of PLLs */ | 86 | nr_plls /* number of PLLs */ |
86 | }; | 87 | }; |
87 | 88 | ||
89 | static void __iomem *reg_base; | ||
90 | |||
91 | #ifdef CONFIG_PM_SLEEP | ||
92 | static struct samsung_clk_reg_dump *exynos5250_save; | ||
93 | |||
88 | /* | 94 | /* |
89 | * list of controller registers to be saved and restored during a | 95 | * list of controller registers to be saved and restored during a |
90 | * suspend/resume cycle. | 96 | * suspend/resume cycle. |
@@ -137,6 +143,41 @@ static unsigned long exynos5250_clk_regs[] __initdata = { | |||
137 | GATE_IP_ACP, | 143 | GATE_IP_ACP, |
138 | }; | 144 | }; |
139 | 145 | ||
146 | static int exynos5250_clk_suspend(void) | ||
147 | { | ||
148 | samsung_clk_save(reg_base, exynos5250_save, | ||
149 | ARRAY_SIZE(exynos5250_clk_regs)); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static void exynos5250_clk_resume(void) | ||
155 | { | ||
156 | samsung_clk_restore(reg_base, exynos5250_save, | ||
157 | ARRAY_SIZE(exynos5250_clk_regs)); | ||
158 | } | ||
159 | |||
160 | static struct syscore_ops exynos5250_clk_syscore_ops = { | ||
161 | .suspend = exynos5250_clk_suspend, | ||
162 | .resume = exynos5250_clk_resume, | ||
163 | }; | ||
164 | |||
165 | static void exynos5250_clk_sleep_init(void) | ||
166 | { | ||
167 | exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs, | ||
168 | ARRAY_SIZE(exynos5250_clk_regs)); | ||
169 | if (!exynos5250_save) { | ||
170 | pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", | ||
171 | __func__); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | register_syscore_ops(&exynos5250_clk_syscore_ops); | ||
176 | } | ||
177 | #else | ||
178 | static void exynos5250_clk_sleep_init(void) {} | ||
179 | #endif | ||
180 | |||
140 | /* list of all parent clock list */ | 181 | /* list of all parent clock list */ |
141 | PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; | 182 | PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; |
142 | PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; | 183 | PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; |
@@ -645,8 +686,6 @@ static struct of_device_id ext_clk_match[] __initdata = { | |||
645 | /* register exynox5250 clocks */ | 686 | /* register exynox5250 clocks */ |
646 | static void __init exynos5250_clk_init(struct device_node *np) | 687 | static void __init exynos5250_clk_init(struct device_node *np) |
647 | { | 688 | { |
648 | void __iomem *reg_base; | ||
649 | |||
650 | if (np) { | 689 | if (np) { |
651 | reg_base = of_iomap(np, 0); | 690 | reg_base = of_iomap(np, 0); |
652 | if (!reg_base) | 691 | if (!reg_base) |
@@ -655,9 +694,7 @@ static void __init exynos5250_clk_init(struct device_node *np) | |||
655 | panic("%s: unable to determine soc\n", __func__); | 694 | panic("%s: unable to determine soc\n", __func__); |
656 | } | 695 | } |
657 | 696 | ||
658 | samsung_clk_init(np, reg_base, CLK_NR_CLKS, | 697 | samsung_clk_init(np, reg_base, CLK_NR_CLKS); |
659 | exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs), | ||
660 | NULL, 0); | ||
661 | samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks, | 698 | samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks, |
662 | ARRAY_SIZE(exynos5250_fixed_rate_ext_clks), | 699 | ARRAY_SIZE(exynos5250_fixed_rate_ext_clks), |
663 | ext_clk_match); | 700 | ext_clk_match); |
@@ -685,6 +722,8 @@ static void __init exynos5250_clk_init(struct device_node *np) | |||
685 | samsung_clk_register_gate(exynos5250_gate_clks, | 722 | samsung_clk_register_gate(exynos5250_gate_clks, |
686 | ARRAY_SIZE(exynos5250_gate_clks)); | 723 | ARRAY_SIZE(exynos5250_gate_clks)); |
687 | 724 | ||
725 | exynos5250_clk_sleep_init(); | ||
726 | |||
688 | pr_info("Exynos5250: clock setup completed, armclk=%ld\n", | 727 | pr_info("Exynos5250: clock setup completed, armclk=%ld\n", |
689 | _get_rate("div_arm2")); | 728 | _get_rate("div_arm2")); |
690 | } | 729 | } |
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index ab4f2f7d88ef..60b26819bed5 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/clk-provider.h> | 16 | #include <linux/clk-provider.h> |
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/of_address.h> | 18 | #include <linux/of_address.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | 20 | ||
20 | #include "clk.h" | 21 | #include "clk.h" |
21 | 22 | ||
@@ -108,6 +109,11 @@ enum exynos5420_plls { | |||
108 | nr_plls /* number of PLLs */ | 109 | nr_plls /* number of PLLs */ |
109 | }; | 110 | }; |
110 | 111 | ||
112 | static void __iomem *reg_base; | ||
113 | |||
114 | #ifdef CONFIG_PM_SLEEP | ||
115 | static struct samsung_clk_reg_dump *exynos5420_save; | ||
116 | |||
111 | /* | 117 | /* |
112 | * list of controller registers to be saved and restored during a | 118 | * list of controller registers to be saved and restored during a |
113 | * suspend/resume cycle. | 119 | * suspend/resume cycle. |
@@ -174,6 +180,41 @@ static unsigned long exynos5420_clk_regs[] __initdata = { | |||
174 | DIV_KFC0, | 180 | DIV_KFC0, |
175 | }; | 181 | }; |
176 | 182 | ||
183 | static int exynos5420_clk_suspend(void) | ||
184 | { | ||
185 | samsung_clk_save(reg_base, exynos5420_save, | ||
186 | ARRAY_SIZE(exynos5420_clk_regs)); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static void exynos5420_clk_resume(void) | ||
192 | { | ||
193 | samsung_clk_restore(reg_base, exynos5420_save, | ||
194 | ARRAY_SIZE(exynos5420_clk_regs)); | ||
195 | } | ||
196 | |||
197 | static struct syscore_ops exynos5420_clk_syscore_ops = { | ||
198 | .suspend = exynos5420_clk_suspend, | ||
199 | .resume = exynos5420_clk_resume, | ||
200 | }; | ||
201 | |||
202 | static void exynos5420_clk_sleep_init(void) | ||
203 | { | ||
204 | exynos5420_save = samsung_clk_alloc_reg_dump(exynos5420_clk_regs, | ||
205 | ARRAY_SIZE(exynos5420_clk_regs)); | ||
206 | if (!exynos5420_save) { | ||
207 | pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", | ||
208 | __func__); | ||
209 | return; | ||
210 | } | ||
211 | |||
212 | register_syscore_ops(&exynos5420_clk_syscore_ops); | ||
213 | } | ||
214 | #else | ||
215 | static void exynos5420_clk_sleep_init(void) {} | ||
216 | #endif | ||
217 | |||
177 | /* list of all parent clocks */ | 218 | /* list of all parent clocks */ |
178 | PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll", | 219 | PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll", |
179 | "sclk_mpll", "sclk_spll" }; | 220 | "sclk_mpll", "sclk_spll" }; |
@@ -737,8 +778,6 @@ static struct of_device_id ext_clk_match[] __initdata = { | |||
737 | /* register exynos5420 clocks */ | 778 | /* register exynos5420 clocks */ |
738 | static void __init exynos5420_clk_init(struct device_node *np) | 779 | static void __init exynos5420_clk_init(struct device_node *np) |
739 | { | 780 | { |
740 | void __iomem *reg_base; | ||
741 | |||
742 | if (np) { | 781 | if (np) { |
743 | reg_base = of_iomap(np, 0); | 782 | reg_base = of_iomap(np, 0); |
744 | if (!reg_base) | 783 | if (!reg_base) |
@@ -747,9 +786,7 @@ static void __init exynos5420_clk_init(struct device_node *np) | |||
747 | panic("%s: unable to determine soc\n", __func__); | 786 | panic("%s: unable to determine soc\n", __func__); |
748 | } | 787 | } |
749 | 788 | ||
750 | samsung_clk_init(np, reg_base, CLK_NR_CLKS, | 789 | samsung_clk_init(np, reg_base, CLK_NR_CLKS); |
751 | exynos5420_clk_regs, ARRAY_SIZE(exynos5420_clk_regs), | ||
752 | NULL, 0); | ||
753 | samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks, | 790 | samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks, |
754 | ARRAY_SIZE(exynos5420_fixed_rate_ext_clks), | 791 | ARRAY_SIZE(exynos5420_fixed_rate_ext_clks), |
755 | ext_clk_match); | 792 | ext_clk_match); |
@@ -765,5 +802,7 @@ static void __init exynos5420_clk_init(struct device_node *np) | |||
765 | ARRAY_SIZE(exynos5420_div_clks)); | 802 | ARRAY_SIZE(exynos5420_div_clks)); |
766 | samsung_clk_register_gate(exynos5420_gate_clks, | 803 | samsung_clk_register_gate(exynos5420_gate_clks, |
767 | ARRAY_SIZE(exynos5420_gate_clks)); | 804 | ARRAY_SIZE(exynos5420_gate_clks)); |
805 | |||
806 | exynos5420_clk_sleep_init(); | ||
768 | } | 807 | } |
769 | CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init); | 808 | CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init); |
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c index cbc15b56891d..2bfad5a993d0 100644 --- a/drivers/clk/samsung/clk-exynos5440.c +++ b/drivers/clk/samsung/clk-exynos5440.c | |||
@@ -101,7 +101,7 @@ static void __init exynos5440_clk_init(struct device_node *np) | |||
101 | return; | 101 | return; |
102 | } | 102 | } |
103 | 103 | ||
104 | samsung_clk_init(np, reg_base, CLK_NR_CLKS, NULL, 0, NULL, 0); | 104 | samsung_clk_init(np, reg_base, CLK_NR_CLKS); |
105 | samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks, | 105 | samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks, |
106 | ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match); | 106 | ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match); |
107 | 107 | ||
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 8e27aee6887e..8bda658137a8 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/clk-provider.h> | 13 | #include <linux/clk-provider.h> |
14 | #include <linux/of.h> | 14 | #include <linux/of.h> |
15 | #include <linux/of_address.h> | 15 | #include <linux/of_address.h> |
16 | #include <linux/syscore_ops.h> | ||
16 | 17 | ||
17 | #include <dt-bindings/clock/samsung,s3c64xx-clock.h> | 18 | #include <dt-bindings/clock/samsung,s3c64xx-clock.h> |
18 | 19 | ||
@@ -61,6 +62,13 @@ enum s3c64xx_plls { | |||
61 | apll, mpll, epll, | 62 | apll, mpll, epll, |
62 | }; | 63 | }; |
63 | 64 | ||
65 | static void __iomem *reg_base; | ||
66 | static bool is_s3c6400; | ||
67 | |||
68 | #ifdef CONFIG_PM_SLEEP | ||
69 | static struct samsung_clk_reg_dump *s3c64xx_save_common; | ||
70 | static struct samsung_clk_reg_dump *s3c64xx_save_soc; | ||
71 | |||
64 | /* | 72 | /* |
65 | * List of controller registers to be saved and restored during | 73 | * List of controller registers to be saved and restored during |
66 | * a suspend/resume cycle. | 74 | * a suspend/resume cycle. |
@@ -87,6 +95,60 @@ static unsigned long s3c6410_clk_regs[] __initdata = { | |||
87 | MEM0_GATE, | 95 | MEM0_GATE, |
88 | }; | 96 | }; |
89 | 97 | ||
98 | static int s3c64xx_clk_suspend(void) | ||
99 | { | ||
100 | samsung_clk_save(reg_base, s3c64xx_save_common, | ||
101 | ARRAY_SIZE(s3c64xx_clk_regs)); | ||
102 | |||
103 | if (!is_s3c6400) | ||
104 | samsung_clk_save(reg_base, s3c64xx_save_soc, | ||
105 | ARRAY_SIZE(s3c6410_clk_regs)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static void s3c64xx_clk_resume(void) | ||
111 | { | ||
112 | samsung_clk_restore(reg_base, s3c64xx_save_common, | ||
113 | ARRAY_SIZE(s3c64xx_clk_regs)); | ||
114 | |||
115 | if (!is_s3c6400) | ||
116 | samsung_clk_restore(reg_base, s3c64xx_save_soc, | ||
117 | ARRAY_SIZE(s3c6410_clk_regs)); | ||
118 | } | ||
119 | |||
120 | static struct syscore_ops s3c64xx_clk_syscore_ops = { | ||
121 | .suspend = s3c64xx_clk_suspend, | ||
122 | .resume = s3c64xx_clk_resume, | ||
123 | }; | ||
124 | |||
125 | static void s3c64xx_clk_sleep_init(void) | ||
126 | { | ||
127 | s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs, | ||
128 | ARRAY_SIZE(s3c64xx_clk_regs)); | ||
129 | if (!s3c64xx_save_common) | ||
130 | goto err_warn; | ||
131 | |||
132 | if (!is_s3c6400) { | ||
133 | s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs, | ||
134 | ARRAY_SIZE(s3c6410_clk_regs)); | ||
135 | if (!s3c64xx_save_soc) | ||
136 | goto err_soc; | ||
137 | } | ||
138 | |||
139 | register_syscore_ops(&s3c64xx_clk_syscore_ops); | ||
140 | return; | ||
141 | |||
142 | err_soc: | ||
143 | kfree(s3c64xx_save_common); | ||
144 | err_warn: | ||
145 | pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", | ||
146 | __func__); | ||
147 | } | ||
148 | #else | ||
149 | static void s3c64xx_clk_sleep_init(void) {} | ||
150 | #endif | ||
151 | |||
90 | /* List of parent clocks common for all S3C64xx SoCs. */ | 152 | /* List of parent clocks common for all S3C64xx SoCs. */ |
91 | PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" }; | 153 | PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" }; |
92 | PNAME(uart_p) = { "mout_epll", "dout_mpll" }; | 154 | PNAME(uart_p) = { "mout_epll", "dout_mpll" }; |
@@ -391,11 +453,11 @@ static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f, | |||
391 | 453 | ||
392 | /* Register s3c64xx clocks. */ | 454 | /* Register s3c64xx clocks. */ |
393 | void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, | 455 | void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, |
394 | unsigned long xusbxti_f, bool is_s3c6400, | 456 | unsigned long xusbxti_f, bool s3c6400, |
395 | void __iomem *reg_base) | 457 | void __iomem *base) |
396 | { | 458 | { |
397 | unsigned long *soc_regs = NULL; | 459 | reg_base = base; |
398 | unsigned long nr_soc_regs = 0; | 460 | is_s3c6400 = s3c6400; |
399 | 461 | ||
400 | if (np) { | 462 | if (np) { |
401 | reg_base = of_iomap(np, 0); | 463 | reg_base = of_iomap(np, 0); |
@@ -403,13 +465,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, | |||
403 | panic("%s: failed to map registers\n", __func__); | 465 | panic("%s: failed to map registers\n", __func__); |
404 | } | 466 | } |
405 | 467 | ||
406 | if (!is_s3c6400) { | 468 | samsung_clk_init(np, reg_base, NR_CLKS); |
407 | soc_regs = s3c6410_clk_regs; | ||
408 | nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs); | ||
409 | } | ||
410 | |||
411 | samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs, | ||
412 | ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs); | ||
413 | 469 | ||
414 | /* Register external clocks. */ | 470 | /* Register external clocks. */ |
415 | if (!np) | 471 | if (!np) |
@@ -452,6 +508,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, | |||
452 | 508 | ||
453 | samsung_clk_register_alias(s3c64xx_clock_aliases, | 509 | samsung_clk_register_alias(s3c64xx_clock_aliases, |
454 | ARRAY_SIZE(s3c64xx_clock_aliases)); | 510 | ARRAY_SIZE(s3c64xx_clock_aliases)); |
511 | s3c64xx_clk_sleep_init(); | ||
455 | 512 | ||
456 | pr_info("%s clocks: apll = %lu, mpll = %lu\n" | 513 | pr_info("%s clocks: apll = %lu, mpll = %lu\n" |
457 | "\tepll = %lu, arm_clk = %lu\n", | 514 | "\tepll = %lu, arm_clk = %lu\n", |
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index f503f32e2f80..91bec3ebdc8f 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c | |||
@@ -21,64 +21,45 @@ static void __iomem *reg_base; | |||
21 | static struct clk_onecell_data clk_data; | 21 | static struct clk_onecell_data clk_data; |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #ifdef CONFIG_PM_SLEEP | 24 | void samsung_clk_save(void __iomem *base, |
25 | static struct samsung_clk_reg_dump *reg_dump; | 25 | struct samsung_clk_reg_dump *rd, |
26 | static unsigned long nr_reg_dump; | 26 | unsigned int num_regs) |
27 | |||
28 | static int samsung_clk_suspend(void) | ||
29 | { | 27 | { |
30 | struct samsung_clk_reg_dump *rd = reg_dump; | 28 | for (; num_regs > 0; --num_regs, ++rd) |
31 | unsigned long i; | 29 | rd->value = readl(base + rd->offset); |
32 | 30 | } | |
33 | for (i = 0; i < nr_reg_dump; i++, rd++) | ||
34 | rd->value = __raw_readl(reg_base + rd->offset); | ||
35 | 31 | ||
36 | return 0; | 32 | void samsung_clk_restore(void __iomem *base, |
33 | const struct samsung_clk_reg_dump *rd, | ||
34 | unsigned int num_regs) | ||
35 | { | ||
36 | for (; num_regs > 0; --num_regs, ++rd) | ||
37 | writel(rd->value, base + rd->offset); | ||
37 | } | 38 | } |
38 | 39 | ||
39 | static void samsung_clk_resume(void) | 40 | struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( |
41 | const unsigned long *rdump, | ||
42 | unsigned long nr_rdump) | ||
40 | { | 43 | { |
41 | struct samsung_clk_reg_dump *rd = reg_dump; | 44 | struct samsung_clk_reg_dump *rd; |
42 | unsigned long i; | 45 | unsigned int i; |
43 | 46 | ||
44 | for (i = 0; i < nr_reg_dump; i++, rd++) | 47 | rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL); |
45 | __raw_writel(rd->value, reg_base + rd->offset); | 48 | if (!rd) |
46 | } | 49 | return NULL; |
50 | |||
51 | for (i = 0; i < nr_rdump; ++i) | ||
52 | rd[i].offset = rdump[i]; | ||
47 | 53 | ||
48 | static struct syscore_ops samsung_clk_syscore_ops = { | 54 | return rd; |
49 | .suspend = samsung_clk_suspend, | 55 | } |
50 | .resume = samsung_clk_resume, | ||
51 | }; | ||
52 | #endif /* CONFIG_PM_SLEEP */ | ||
53 | 56 | ||
54 | /* setup the essentials required to support clock lookup using ccf */ | 57 | /* setup the essentials required to support clock lookup using ccf */ |
55 | void __init samsung_clk_init(struct device_node *np, void __iomem *base, | 58 | void __init samsung_clk_init(struct device_node *np, void __iomem *base, |
56 | unsigned long nr_clks, unsigned long *rdump, | 59 | unsigned long nr_clks) |
57 | unsigned long nr_rdump, unsigned long *soc_rdump, | ||
58 | unsigned long nr_soc_rdump) | ||
59 | { | 60 | { |
60 | reg_base = base; | 61 | reg_base = base; |
61 | 62 | ||
62 | #ifdef CONFIG_PM_SLEEP | ||
63 | if (rdump && nr_rdump) { | ||
64 | unsigned int idx; | ||
65 | reg_dump = kzalloc(sizeof(struct samsung_clk_reg_dump) | ||
66 | * (nr_rdump + nr_soc_rdump), GFP_KERNEL); | ||
67 | if (!reg_dump) { | ||
68 | pr_err("%s: memory alloc for register dump failed\n", | ||
69 | __func__); | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | for (idx = 0; idx < nr_rdump; idx++) | ||
74 | reg_dump[idx].offset = rdump[idx]; | ||
75 | for (idx = 0; idx < nr_soc_rdump; idx++) | ||
76 | reg_dump[nr_rdump + idx].offset = soc_rdump[idx]; | ||
77 | nr_reg_dump = nr_rdump + nr_soc_rdump; | ||
78 | register_syscore_ops(&samsung_clk_syscore_ops); | ||
79 | } | ||
80 | #endif | ||
81 | |||
82 | clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); | 63 | clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); |
83 | if (!clk_table) | 64 | if (!clk_table) |
84 | panic("could not allocate clock lookup table\n"); | 65 | panic("could not allocate clock lookup table\n"); |
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index 31b4174e7a5b..c7141ba826e0 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h | |||
@@ -313,9 +313,7 @@ struct samsung_pll_clock { | |||
313 | _lock, _con, _rtable, _alias) | 313 | _lock, _con, _rtable, _alias) |
314 | 314 | ||
315 | extern void __init samsung_clk_init(struct device_node *np, void __iomem *base, | 315 | extern void __init samsung_clk_init(struct device_node *np, void __iomem *base, |
316 | unsigned long nr_clks, unsigned long *rdump, | 316 | unsigned long nr_clks); |
317 | unsigned long nr_rdump, unsigned long *soc_rdump, | ||
318 | unsigned long nr_soc_rdump); | ||
319 | extern void __init samsung_clk_of_register_fixed_ext( | 317 | extern void __init samsung_clk_of_register_fixed_ext( |
320 | struct samsung_fixed_rate_clock *fixed_rate_clk, | 318 | struct samsung_fixed_rate_clock *fixed_rate_clk, |
321 | unsigned int nr_fixed_rate_clk, | 319 | unsigned int nr_fixed_rate_clk, |
@@ -340,4 +338,14 @@ extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list, | |||
340 | 338 | ||
341 | extern unsigned long _get_rate(const char *clk_name); | 339 | extern unsigned long _get_rate(const char *clk_name); |
342 | 340 | ||
341 | extern void samsung_clk_save(void __iomem *base, | ||
342 | struct samsung_clk_reg_dump *rd, | ||
343 | unsigned int num_regs); | ||
344 | extern void samsung_clk_restore(void __iomem *base, | ||
345 | const struct samsung_clk_reg_dump *rd, | ||
346 | unsigned int num_regs); | ||
347 | extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( | ||
348 | const unsigned long *rdump, | ||
349 | unsigned long nr_rdump); | ||
350 | |||
343 | #endif /* __SAMSUNG_CLK_H */ | 351 | #endif /* __SAMSUNG_CLK_H */ |
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index 8cbfcf88fae3..a820b0cfcf57 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c | |||
@@ -33,7 +33,7 @@ struct clk_icst { | |||
33 | struct clk_hw hw; | 33 | struct clk_hw hw; |
34 | void __iomem *vcoreg; | 34 | void __iomem *vcoreg; |
35 | void __iomem *lockreg; | 35 | void __iomem *lockreg; |
36 | const struct icst_params *params; | 36 | struct icst_params *params; |
37 | unsigned long rate; | 37 | unsigned long rate; |
38 | }; | 38 | }; |
39 | 39 | ||
@@ -84,6 +84,8 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw, | |||
84 | struct clk_icst *icst = to_icst(hw); | 84 | struct clk_icst *icst = to_icst(hw); |
85 | struct icst_vco vco; | 85 | struct icst_vco vco; |
86 | 86 | ||
87 | if (parent_rate) | ||
88 | icst->params->ref = parent_rate; | ||
87 | vco = vco_get(icst->vcoreg); | 89 | vco = vco_get(icst->vcoreg); |
88 | icst->rate = icst_hz(icst->params, vco); | 90 | icst->rate = icst_hz(icst->params, vco); |
89 | return icst->rate; | 91 | return icst->rate; |
@@ -105,6 +107,8 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate, | |||
105 | struct clk_icst *icst = to_icst(hw); | 107 | struct clk_icst *icst = to_icst(hw); |
106 | struct icst_vco vco; | 108 | struct icst_vco vco; |
107 | 109 | ||
110 | if (parent_rate) | ||
111 | icst->params->ref = parent_rate; | ||
108 | vco = icst_hz_to_vco(icst->params, rate); | 112 | vco = icst_hz_to_vco(icst->params, rate); |
109 | icst->rate = icst_hz(icst->params, vco); | 113 | icst->rate = icst_hz(icst->params, vco); |
110 | vco_set(icst->lockreg, icst->vcoreg, vco); | 114 | vco_set(icst->lockreg, icst->vcoreg, vco); |
@@ -120,24 +124,33 @@ static const struct clk_ops icst_ops = { | |||
120 | struct clk *icst_clk_register(struct device *dev, | 124 | struct clk *icst_clk_register(struct device *dev, |
121 | const struct clk_icst_desc *desc, | 125 | const struct clk_icst_desc *desc, |
122 | const char *name, | 126 | const char *name, |
127 | const char *parent_name, | ||
123 | void __iomem *base) | 128 | void __iomem *base) |
124 | { | 129 | { |
125 | struct clk *clk; | 130 | struct clk *clk; |
126 | struct clk_icst *icst; | 131 | struct clk_icst *icst; |
127 | struct clk_init_data init; | 132 | struct clk_init_data init; |
133 | struct icst_params *pclone; | ||
128 | 134 | ||
129 | icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL); | 135 | icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL); |
130 | if (!icst) { | 136 | if (!icst) { |
131 | pr_err("could not allocate ICST clock!\n"); | 137 | pr_err("could not allocate ICST clock!\n"); |
132 | return ERR_PTR(-ENOMEM); | 138 | return ERR_PTR(-ENOMEM); |
133 | } | 139 | } |
140 | |||
141 | pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL); | ||
142 | if (!pclone) { | ||
143 | pr_err("could not clone ICST params\n"); | ||
144 | return ERR_PTR(-ENOMEM); | ||
145 | } | ||
146 | |||
134 | init.name = name; | 147 | init.name = name; |
135 | init.ops = &icst_ops; | 148 | init.ops = &icst_ops; |
136 | init.flags = CLK_IS_ROOT; | 149 | init.flags = CLK_IS_ROOT; |
137 | init.parent_names = NULL; | 150 | init.parent_names = (parent_name ? &parent_name : NULL); |
138 | init.num_parents = 0; | 151 | init.num_parents = (parent_name ? 1 : 0); |
139 | icst->hw.init = &init; | 152 | icst->hw.init = &init; |
140 | icst->params = desc->params; | 153 | icst->params = pclone; |
141 | icst->vcoreg = base + desc->vco_offset; | 154 | icst->vcoreg = base + desc->vco_offset; |
142 | icst->lockreg = base + desc->lock_offset; | 155 | icst->lockreg = base + desc->lock_offset; |
143 | 156 | ||
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h index be99dd0da785..04e6f0aef588 100644 --- a/drivers/clk/versatile/clk-icst.h +++ b/drivers/clk/versatile/clk-icst.h | |||
@@ -16,4 +16,5 @@ struct clk_icst_desc { | |||
16 | struct clk *icst_clk_register(struct device *dev, | 16 | struct clk *icst_clk_register(struct device *dev, |
17 | const struct clk_icst_desc *desc, | 17 | const struct clk_icst_desc *desc, |
18 | const char *name, | 18 | const char *name, |
19 | const char *parent_name, | ||
19 | void __iomem *base); | 20 | void __iomem *base); |
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c index 844f8d711a12..6d8b8e1a080a 100644 --- a/drivers/clk/versatile/clk-impd1.c +++ b/drivers/clk/versatile/clk-impd1.c | |||
@@ -93,13 +93,15 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id) | |||
93 | imc = &impd1_clks[id]; | 93 | imc = &impd1_clks[id]; |
94 | 94 | ||
95 | imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id); | 95 | imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id); |
96 | clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base); | 96 | clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL, |
97 | base); | ||
97 | imc->vco1clk = clk; | 98 | imc->vco1clk = clk; |
98 | imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id); | 99 | imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id); |
99 | 100 | ||
100 | /* VCO2 is also called "CLK2" */ | 101 | /* VCO2 is also called "CLK2" */ |
101 | imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id); | 102 | imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id); |
102 | clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base); | 103 | clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL, |
104 | base); | ||
103 | imc->vco2clk = clk; | 105 | imc->vco2clk = clk; |
104 | 106 | ||
105 | /* MMCI uses CLK2 right off */ | 107 | /* MMCI uses CLK2 right off */ |
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c index bda8967e09c2..734c4b8fe6ab 100644 --- a/drivers/clk/versatile/clk-integrator.c +++ b/drivers/clk/versatile/clk-integrator.c | |||
@@ -10,21 +10,17 @@ | |||
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/clkdev.h> | 11 | #include <linux/clkdev.h> |
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/platform_data/clk-integrator.h> | 13 | #include <linux/of.h> |
14 | 14 | #include <linux/of_address.h> | |
15 | #include <mach/hardware.h> | ||
16 | #include <mach/platform.h> | ||
17 | 15 | ||
18 | #include "clk-icst.h" | 16 | #include "clk-icst.h" |
19 | 17 | ||
20 | /* | 18 | #define INTEGRATOR_HDR_LOCK_OFFSET 0x14 |
21 | * Implementation of the ARM Integrator/AP and Integrator/CP clock tree. | ||
22 | * Inspired by portions of: | ||
23 | * plat-versatile/clock.c and plat-versatile/include/plat/clock.h | ||
24 | */ | ||
25 | 19 | ||
26 | static const struct icst_params cp_auxvco_params = { | 20 | /* Base offset for the core module */ |
27 | .ref = 24000000, | 21 | static void __iomem *cm_base; |
22 | |||
23 | static const struct icst_params cp_auxosc_params = { | ||
28 | .vco_max = ICST525_VCO_MAX_5V, | 24 | .vco_max = ICST525_VCO_MAX_5V, |
29 | .vco_min = ICST525_VCO_MIN, | 25 | .vco_min = ICST525_VCO_MIN, |
30 | .vd_min = 8, | 26 | .vd_min = 8, |
@@ -35,50 +31,39 @@ static const struct icst_params cp_auxvco_params = { | |||
35 | .idx2s = icst525_idx2s, | 31 | .idx2s = icst525_idx2s, |
36 | }; | 32 | }; |
37 | 33 | ||
38 | static const struct clk_icst_desc __initdata cp_icst_desc = { | 34 | static const struct clk_icst_desc __initdata cm_auxosc_desc = { |
39 | .params = &cp_auxvco_params, | 35 | .params = &cp_auxosc_params, |
40 | .vco_offset = 0x1c, | 36 | .vco_offset = 0x1c, |
41 | .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, | 37 | .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, |
42 | }; | 38 | }; |
43 | 39 | ||
44 | /* | 40 | static void __init of_integrator_cm_osc_setup(struct device_node *np) |
45 | * integrator_clk_init() - set up the integrator clock tree | ||
46 | * @is_cp: pass true if it's the Integrator/CP else AP is assumed | ||
47 | */ | ||
48 | void __init integrator_clk_init(bool is_cp) | ||
49 | { | 41 | { |
50 | struct clk *clk; | 42 | struct clk *clk = ERR_PTR(-EINVAL); |
51 | 43 | const char *clk_name = np->name; | |
52 | /* APB clock dummy */ | 44 | const struct clk_icst_desc *desc = &cm_auxosc_desc; |
53 | clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0); | 45 | const char *parent_name; |
54 | clk_register_clkdev(clk, "apb_pclk", NULL); | ||
55 | |||
56 | /* UART reference clock */ | ||
57 | clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT, | ||
58 | 14745600); | ||
59 | clk_register_clkdev(clk, NULL, "uart0"); | ||
60 | clk_register_clkdev(clk, NULL, "uart1"); | ||
61 | if (is_cp) | ||
62 | clk_register_clkdev(clk, NULL, "mmci"); | ||
63 | |||
64 | /* 24 MHz clock */ | ||
65 | clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT, | ||
66 | 24000000); | ||
67 | clk_register_clkdev(clk, NULL, "kmi0"); | ||
68 | clk_register_clkdev(clk, NULL, "kmi1"); | ||
69 | if (!is_cp) | ||
70 | clk_register_clkdev(clk, NULL, "ap_timer"); | ||
71 | 46 | ||
72 | if (!is_cp) | 47 | if (!cm_base) { |
73 | return; | 48 | /* Remap the core module base if not done yet */ |
49 | struct device_node *parent; | ||
74 | 50 | ||
75 | /* 1 MHz clock */ | 51 | parent = of_get_parent(np); |
76 | clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT, | 52 | if (!np) { |
77 | 1000000); | 53 | pr_err("no parent on core module clock\n"); |
78 | clk_register_clkdev(clk, NULL, "sp804"); | 54 | return; |
55 | } | ||
56 | cm_base = of_iomap(parent, 0); | ||
57 | if (!cm_base) { | ||
58 | pr_err("could not remap core module base\n"); | ||
59 | return; | ||
60 | } | ||
61 | } | ||
79 | 62 | ||
80 | /* ICST VCO clock used on the Integrator/CP CLCD */ | 63 | parent_name = of_clk_get_parent_name(np, 0); |
81 | clk = icst_clk_register(NULL, &cp_icst_desc, "icst", | 64 | clk = icst_clk_register(NULL, desc, clk_name, parent_name, cm_base); |
82 | __io_address(INTEGRATOR_HDR_BASE)); | 65 | if (!IS_ERR(clk)) |
83 | clk_register_clkdev(clk, NULL, "clcd"); | 66 | of_clk_add_provider(np, of_clk_src_simple_get, clk); |
84 | } | 67 | } |
68 | CLK_OF_DECLARE(integrator_cm_auxosc_clk, | ||
69 | "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup); | ||
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c index 747e7b31117c..c8b523117fb7 100644 --- a/drivers/clk/versatile/clk-realview.c +++ b/drivers/clk/versatile/clk-realview.c | |||
@@ -85,10 +85,10 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176) | |||
85 | /* ICST VCO clock */ | 85 | /* ICST VCO clock */ |
86 | if (is_pb1176) | 86 | if (is_pb1176) |
87 | clk = icst_clk_register(NULL, &realview_osc0_desc, | 87 | clk = icst_clk_register(NULL, &realview_osc0_desc, |
88 | "osc0", sysbase); | 88 | "osc0", NULL, sysbase); |
89 | else | 89 | else |
90 | clk = icst_clk_register(NULL, &realview_osc4_desc, | 90 | clk = icst_clk_register(NULL, &realview_osc4_desc, |
91 | "osc4", sysbase); | 91 | "osc4", NULL, sysbase); |
92 | 92 | ||
93 | clk_register_clkdev(clk, NULL, "dev:clcd"); | 93 | clk_register_clkdev(clk, NULL, "dev:clcd"); |
94 | clk_register_clkdev(clk, NULL, "issp:clcd"); | 94 | clk_register_clkdev(clk, NULL, "issp:clcd"); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 9fb627046e17..1e2b9db563ec 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -122,7 +122,7 @@ config ARM_INTEGRATOR | |||
122 | If in doubt, say Y. | 122 | If in doubt, say Y. |
123 | 123 | ||
124 | config ARM_KIRKWOOD_CPUFREQ | 124 | config ARM_KIRKWOOD_CPUFREQ |
125 | def_bool ARCH_KIRKWOOD && OF | 125 | def_bool MACH_KIRKWOOD |
126 | help | 126 | help |
127 | This adds the CPUFreq driver for Marvell Kirkwood | 127 | This adds the CPUFreq driver for Marvell Kirkwood |
128 | SoCs. | 128 | SoCs. |
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index d988948a89a0..97ccc31dbdd8 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -22,7 +22,7 @@ config ARM_HIGHBANK_CPUIDLE | |||
22 | 22 | ||
23 | config ARM_KIRKWOOD_CPUIDLE | 23 | config ARM_KIRKWOOD_CPUIDLE |
24 | bool "CPU Idle Driver for Marvell Kirkwood SoCs" | 24 | bool "CPU Idle Driver for Marvell Kirkwood SoCs" |
25 | depends on ARCH_KIRKWOOD | 25 | depends on ARCH_KIRKWOOD || MACH_KIRKWOOD |
26 | help | 26 | help |
27 | This adds the CPU Idle driver for Marvell Kirkwood SoCs. | 27 | This adds the CPU Idle driver for Marvell Kirkwood SoCs. |
28 | 28 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 92d8e9a064b4..a86c49a605c6 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -210,7 +210,7 @@ config GPIO_MSM_V1 | |||
210 | 210 | ||
211 | config GPIO_MSM_V2 | 211 | config GPIO_MSM_V2 |
212 | tristate "Qualcomm MSM GPIO v2" | 212 | tristate "Qualcomm MSM GPIO v2" |
213 | depends on GPIOLIB && OF && ARCH_MSM | 213 | depends on GPIOLIB && OF && ARCH_QCOM |
214 | help | 214 | help |
215 | Say yes here to support the GPIO interface on ARM v7 based | 215 | Say yes here to support the GPIO interface on ARM v7 based |
216 | Qualcomm MSM chips. Most of the pins on the MSM can be | 216 | Qualcomm MSM chips. Most of the pins on the MSM can be |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index c69d1e07a3a6..b6984971ce0c 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
@@ -3,7 +3,7 @@ config DRM_MSM | |||
3 | tristate "MSM DRM" | 3 | tristate "MSM DRM" |
4 | depends on DRM | 4 | depends on DRM |
5 | depends on MSM_IOMMU | 5 | depends on MSM_IOMMU |
6 | depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST) | 6 | depends on ARCH_MSM8960 || (ARM && COMPILE_TEST) |
7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
8 | select SHMEM | 8 | select SHMEM |
9 | select TMPFS | 9 | select TMPFS |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index ec42d2decb2f..d770f7406631 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
@@ -77,3 +77,11 @@ config VERSATILE_FPGA_IRQ_NR | |||
77 | config XTENSA_MX | 77 | config XTENSA_MX |
78 | bool | 78 | bool |
79 | select IRQ_DOMAIN | 79 | select IRQ_DOMAIN |
80 | |||
81 | config IRQ_CROSSBAR | ||
82 | bool | ||
83 | help | ||
84 | Support for a CROSSBAR ip that preceeds the main interrupt controller. | ||
85 | The primary irqchip invokes the crossbar's callback which inturn allocates | ||
86 | a free irq and configures the IP. Thus the peripheral interrupts are | ||
87 | routed to one of the free irqchip interrupt lines. | ||
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 6cee9efa26e7..f180f8d5fb7b 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
@@ -28,3 +28,4 @@ obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o | |||
28 | obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o | 28 | obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o |
29 | obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o | 29 | obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o |
30 | obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o | 30 | obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o |
31 | obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o | ||
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c new file mode 100644 index 000000000000..fc817d28d1fe --- /dev/null +++ b/drivers/irqchip/irq-crossbar.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* | ||
2 | * drivers/irqchip/irq-crossbar.c | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sricharan R <r.sricharan@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/of_address.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/irqchip/arm-gic.h> | ||
18 | |||
19 | #define IRQ_FREE -1 | ||
20 | #define GIC_IRQ_START 32 | ||
21 | |||
22 | /* | ||
23 | * @int_max: maximum number of supported interrupts | ||
24 | * @irq_map: array of interrupts to crossbar number mapping | ||
25 | * @crossbar_base: crossbar base address | ||
26 | * @register_offsets: offsets for each irq number | ||
27 | */ | ||
28 | struct crossbar_device { | ||
29 | uint int_max; | ||
30 | uint *irq_map; | ||
31 | void __iomem *crossbar_base; | ||
32 | int *register_offsets; | ||
33 | void (*write) (int, int); | ||
34 | }; | ||
35 | |||
36 | static struct crossbar_device *cb; | ||
37 | |||
38 | static inline void crossbar_writel(int irq_no, int cb_no) | ||
39 | { | ||
40 | writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); | ||
41 | } | ||
42 | |||
43 | static inline void crossbar_writew(int irq_no, int cb_no) | ||
44 | { | ||
45 | writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); | ||
46 | } | ||
47 | |||
48 | static inline void crossbar_writeb(int irq_no, int cb_no) | ||
49 | { | ||
50 | writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); | ||
51 | } | ||
52 | |||
53 | static inline int allocate_free_irq(int cb_no) | ||
54 | { | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < cb->int_max; i++) { | ||
58 | if (cb->irq_map[i] == IRQ_FREE) { | ||
59 | cb->irq_map[i] = cb_no; | ||
60 | return i; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | return -ENODEV; | ||
65 | } | ||
66 | |||
67 | static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, | ||
68 | irq_hw_number_t hw) | ||
69 | { | ||
70 | cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) | ||
75 | { | ||
76 | irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; | ||
77 | |||
78 | if (hw > GIC_IRQ_START) | ||
79 | cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; | ||
80 | } | ||
81 | |||
82 | static int crossbar_domain_xlate(struct irq_domain *d, | ||
83 | struct device_node *controller, | ||
84 | const u32 *intspec, unsigned int intsize, | ||
85 | unsigned long *out_hwirq, | ||
86 | unsigned int *out_type) | ||
87 | { | ||
88 | unsigned long ret; | ||
89 | |||
90 | ret = allocate_free_irq(intspec[1]); | ||
91 | |||
92 | if (IS_ERR_VALUE(ret)) | ||
93 | return ret; | ||
94 | |||
95 | *out_hwirq = ret + GIC_IRQ_START; | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | const struct irq_domain_ops routable_irq_domain_ops = { | ||
100 | .map = crossbar_domain_map, | ||
101 | .unmap = crossbar_domain_unmap, | ||
102 | .xlate = crossbar_domain_xlate | ||
103 | }; | ||
104 | |||
105 | static int __init crossbar_of_init(struct device_node *node) | ||
106 | { | ||
107 | int i, size, max, reserved = 0, entry; | ||
108 | const __be32 *irqsr; | ||
109 | |||
110 | cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); | ||
111 | |||
112 | if (!cb) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | cb->crossbar_base = of_iomap(node, 0); | ||
116 | if (!cb->crossbar_base) | ||
117 | goto err1; | ||
118 | |||
119 | of_property_read_u32(node, "ti,max-irqs", &max); | ||
120 | cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL); | ||
121 | if (!cb->irq_map) | ||
122 | goto err2; | ||
123 | |||
124 | cb->int_max = max; | ||
125 | |||
126 | for (i = 0; i < max; i++) | ||
127 | cb->irq_map[i] = IRQ_FREE; | ||
128 | |||
129 | /* Get and mark reserved irqs */ | ||
130 | irqsr = of_get_property(node, "ti,irqs-reserved", &size); | ||
131 | if (irqsr) { | ||
132 | size /= sizeof(__be32); | ||
133 | |||
134 | for (i = 0; i < size; i++) { | ||
135 | of_property_read_u32_index(node, | ||
136 | "ti,irqs-reserved", | ||
137 | i, &entry); | ||
138 | if (entry > max) { | ||
139 | pr_err("Invalid reserved entry\n"); | ||
140 | goto err3; | ||
141 | } | ||
142 | cb->irq_map[entry] = 0; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL); | ||
147 | if (!cb->register_offsets) | ||
148 | goto err3; | ||
149 | |||
150 | of_property_read_u32(node, "ti,reg-size", &size); | ||
151 | |||
152 | switch (size) { | ||
153 | case 1: | ||
154 | cb->write = crossbar_writeb; | ||
155 | break; | ||
156 | case 2: | ||
157 | cb->write = crossbar_writew; | ||
158 | break; | ||
159 | case 4: | ||
160 | cb->write = crossbar_writel; | ||
161 | break; | ||
162 | default: | ||
163 | pr_err("Invalid reg-size property\n"); | ||
164 | goto err4; | ||
165 | break; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Register offsets are not linear because of the | ||
170 | * reserved irqs. so find and store the offsets once. | ||
171 | */ | ||
172 | for (i = 0; i < max; i++) { | ||
173 | if (!cb->irq_map[i]) | ||
174 | continue; | ||
175 | |||
176 | cb->register_offsets[i] = reserved; | ||
177 | reserved += size; | ||
178 | } | ||
179 | |||
180 | register_routable_domain_ops(&routable_irq_domain_ops); | ||
181 | return 0; | ||
182 | |||
183 | err4: | ||
184 | kfree(cb->register_offsets); | ||
185 | err3: | ||
186 | kfree(cb->irq_map); | ||
187 | err2: | ||
188 | iounmap(cb->crossbar_base); | ||
189 | err1: | ||
190 | kfree(cb); | ||
191 | return -ENOMEM; | ||
192 | } | ||
193 | |||
194 | static const struct of_device_id crossbar_match[] __initconst = { | ||
195 | { .compatible = "ti,irq-crossbar" }, | ||
196 | {} | ||
197 | }; | ||
198 | |||
199 | int __init irqcrossbar_init(void) | ||
200 | { | ||
201 | struct device_node *np; | ||
202 | np = of_find_matching_node(NULL, crossbar_match); | ||
203 | if (!np) | ||
204 | return -ENODEV; | ||
205 | |||
206 | crossbar_of_init(np); | ||
207 | return 0; | ||
208 | } | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 63922b9ba6b7..4300b6606f5e 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -824,16 +824,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
824 | irq_set_chip_and_handler(irq, &gic_chip, | 824 | irq_set_chip_and_handler(irq, &gic_chip, |
825 | handle_fasteoi_irq); | 825 | handle_fasteoi_irq); |
826 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 826 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
827 | |||
828 | gic_routable_irq_domain_ops->map(d, irq, hw); | ||
827 | } | 829 | } |
828 | irq_set_chip_data(irq, d->host_data); | 830 | irq_set_chip_data(irq, d->host_data); |
829 | return 0; | 831 | return 0; |
830 | } | 832 | } |
831 | 833 | ||
834 | static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) | ||
835 | { | ||
836 | gic_routable_irq_domain_ops->unmap(d, irq); | ||
837 | } | ||
838 | |||
832 | static int gic_irq_domain_xlate(struct irq_domain *d, | 839 | static int gic_irq_domain_xlate(struct irq_domain *d, |
833 | struct device_node *controller, | 840 | struct device_node *controller, |
834 | const u32 *intspec, unsigned int intsize, | 841 | const u32 *intspec, unsigned int intsize, |
835 | unsigned long *out_hwirq, unsigned int *out_type) | 842 | unsigned long *out_hwirq, unsigned int *out_type) |
836 | { | 843 | { |
844 | unsigned long ret = 0; | ||
845 | |||
837 | if (d->of_node != controller) | 846 | if (d->of_node != controller) |
838 | return -EINVAL; | 847 | return -EINVAL; |
839 | if (intsize < 3) | 848 | if (intsize < 3) |
@@ -843,11 +852,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d, | |||
843 | *out_hwirq = intspec[1] + 16; | 852 | *out_hwirq = intspec[1] + 16; |
844 | 853 | ||
845 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | 854 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ |
846 | if (!intspec[0]) | 855 | if (!intspec[0]) { |
847 | *out_hwirq += 16; | 856 | ret = gic_routable_irq_domain_ops->xlate(d, controller, |
857 | intspec, | ||
858 | intsize, | ||
859 | out_hwirq, | ||
860 | out_type); | ||
861 | |||
862 | if (IS_ERR_VALUE(ret)) | ||
863 | return ret; | ||
864 | } | ||
848 | 865 | ||
849 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | 866 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; |
850 | return 0; | 867 | |
868 | return ret; | ||
851 | } | 869 | } |
852 | 870 | ||
853 | #ifdef CONFIG_SMP | 871 | #ifdef CONFIG_SMP |
@@ -871,9 +889,41 @@ static struct notifier_block gic_cpu_notifier = { | |||
871 | 889 | ||
872 | static const struct irq_domain_ops gic_irq_domain_ops = { | 890 | static const struct irq_domain_ops gic_irq_domain_ops = { |
873 | .map = gic_irq_domain_map, | 891 | .map = gic_irq_domain_map, |
892 | .unmap = gic_irq_domain_unmap, | ||
874 | .xlate = gic_irq_domain_xlate, | 893 | .xlate = gic_irq_domain_xlate, |
875 | }; | 894 | }; |
876 | 895 | ||
896 | /* Default functions for routable irq domain */ | ||
897 | static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, | ||
898 | irq_hw_number_t hw) | ||
899 | { | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | static void gic_routable_irq_domain_unmap(struct irq_domain *d, | ||
904 | unsigned int irq) | ||
905 | { | ||
906 | } | ||
907 | |||
908 | static int gic_routable_irq_domain_xlate(struct irq_domain *d, | ||
909 | struct device_node *controller, | ||
910 | const u32 *intspec, unsigned int intsize, | ||
911 | unsigned long *out_hwirq, | ||
912 | unsigned int *out_type) | ||
913 | { | ||
914 | *out_hwirq += 16; | ||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | const struct irq_domain_ops gic_default_routable_irq_domain_ops = { | ||
919 | .map = gic_routable_irq_domain_map, | ||
920 | .unmap = gic_routable_irq_domain_unmap, | ||
921 | .xlate = gic_routable_irq_domain_xlate, | ||
922 | }; | ||
923 | |||
924 | const struct irq_domain_ops *gic_routable_irq_domain_ops = | ||
925 | &gic_default_routable_irq_domain_ops; | ||
926 | |||
877 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, | 927 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
878 | void __iomem *dist_base, void __iomem *cpu_base, | 928 | void __iomem *dist_base, void __iomem *cpu_base, |
879 | u32 percpu_offset, struct device_node *node) | 929 | u32 percpu_offset, struct device_node *node) |
@@ -881,6 +931,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
881 | irq_hw_number_t hwirq_base; | 931 | irq_hw_number_t hwirq_base; |
882 | struct gic_chip_data *gic; | 932 | struct gic_chip_data *gic; |
883 | int gic_irqs, irq_base, i; | 933 | int gic_irqs, irq_base, i; |
934 | int nr_routable_irqs; | ||
884 | 935 | ||
885 | BUG_ON(gic_nr >= MAX_GIC_NR); | 936 | BUG_ON(gic_nr >= MAX_GIC_NR); |
886 | 937 | ||
@@ -946,14 +997,25 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
946 | gic->gic_irqs = gic_irqs; | 997 | gic->gic_irqs = gic_irqs; |
947 | 998 | ||
948 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | 999 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ |
949 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); | 1000 | |
950 | if (IS_ERR_VALUE(irq_base)) { | 1001 | if (of_property_read_u32(node, "arm,routable-irqs", |
951 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 1002 | &nr_routable_irqs)) { |
952 | irq_start); | 1003 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, |
953 | irq_base = irq_start; | 1004 | numa_node_id()); |
1005 | if (IS_ERR_VALUE(irq_base)) { | ||
1006 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | ||
1007 | irq_start); | ||
1008 | irq_base = irq_start; | ||
1009 | } | ||
1010 | |||
1011 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, | ||
1012 | hwirq_base, &gic_irq_domain_ops, gic); | ||
1013 | } else { | ||
1014 | gic->domain = irq_domain_add_linear(node, nr_routable_irqs, | ||
1015 | &gic_irq_domain_ops, | ||
1016 | gic); | ||
954 | } | 1017 | } |
955 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, | 1018 | |
956 | hwirq_base, &gic_irq_domain_ops, gic); | ||
957 | if (WARN_ON(!gic->domain)) | 1019 | if (WARN_ON(!gic->domain)) |
958 | return; | 1020 | return; |
959 | 1021 | ||
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 473f09a74d4d..37dab0b472cd 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c | |||
@@ -57,6 +57,7 @@ | |||
57 | 57 | ||
58 | /** | 58 | /** |
59 | * struct vic_device - VIC PM device | 59 | * struct vic_device - VIC PM device |
60 | * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0. | ||
60 | * @irq: The IRQ number for the base of the VIC. | 61 | * @irq: The IRQ number for the base of the VIC. |
61 | * @base: The register base for the VIC. | 62 | * @base: The register base for the VIC. |
62 | * @valid_sources: A bitmask of valid interrupts | 63 | * @valid_sources: A bitmask of valid interrupts |
@@ -224,6 +225,17 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) | |||
224 | return handled; | 225 | return handled; |
225 | } | 226 | } |
226 | 227 | ||
228 | static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) | ||
229 | { | ||
230 | u32 stat, hwirq; | ||
231 | struct vic_device *vic = irq_desc_get_handler_data(desc); | ||
232 | |||
233 | while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { | ||
234 | hwirq = ffs(stat) - 1; | ||
235 | generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); | ||
236 | } | ||
237 | } | ||
238 | |||
227 | /* | 239 | /* |
228 | * Keep iterating over all registered VIC's until there are no pending | 240 | * Keep iterating over all registered VIC's until there are no pending |
229 | * interrupts. | 241 | * interrupts. |
@@ -246,6 +258,7 @@ static struct irq_domain_ops vic_irqdomain_ops = { | |||
246 | /** | 258 | /** |
247 | * vic_register() - Register a VIC. | 259 | * vic_register() - Register a VIC. |
248 | * @base: The base address of the VIC. | 260 | * @base: The base address of the VIC. |
261 | * @parent_irq: The parent IRQ if cascaded, else 0. | ||
249 | * @irq: The base IRQ for the VIC. | 262 | * @irq: The base IRQ for the VIC. |
250 | * @valid_sources: bitmask of valid interrupts | 263 | * @valid_sources: bitmask of valid interrupts |
251 | * @resume_sources: bitmask of interrupts allowed for resume sources. | 264 | * @resume_sources: bitmask of interrupts allowed for resume sources. |
@@ -257,7 +270,8 @@ static struct irq_domain_ops vic_irqdomain_ops = { | |||
257 | * | 270 | * |
258 | * This also configures the IRQ domain for the VIC. | 271 | * This also configures the IRQ domain for the VIC. |
259 | */ | 272 | */ |
260 | static void __init vic_register(void __iomem *base, unsigned int irq, | 273 | static void __init vic_register(void __iomem *base, unsigned int parent_irq, |
274 | unsigned int irq, | ||
261 | u32 valid_sources, u32 resume_sources, | 275 | u32 valid_sources, u32 resume_sources, |
262 | struct device_node *node) | 276 | struct device_node *node) |
263 | { | 277 | { |
@@ -273,15 +287,25 @@ static void __init vic_register(void __iomem *base, unsigned int irq, | |||
273 | v->base = base; | 287 | v->base = base; |
274 | v->valid_sources = valid_sources; | 288 | v->valid_sources = valid_sources; |
275 | v->resume_sources = resume_sources; | 289 | v->resume_sources = resume_sources; |
276 | v->irq = irq; | ||
277 | set_handle_irq(vic_handle_irq); | 290 | set_handle_irq(vic_handle_irq); |
278 | vic_id++; | 291 | vic_id++; |
292 | |||
293 | if (parent_irq) { | ||
294 | irq_set_handler_data(parent_irq, v); | ||
295 | irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded); | ||
296 | } | ||
297 | |||
279 | v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, | 298 | v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, |
280 | &vic_irqdomain_ops, v); | 299 | &vic_irqdomain_ops, v); |
281 | /* create an IRQ mapping for each valid IRQ */ | 300 | /* create an IRQ mapping for each valid IRQ */ |
282 | for (i = 0; i < fls(valid_sources); i++) | 301 | for (i = 0; i < fls(valid_sources); i++) |
283 | if (valid_sources & (1 << i)) | 302 | if (valid_sources & (1 << i)) |
284 | irq_create_mapping(v->domain, i); | 303 | irq_create_mapping(v->domain, i); |
304 | /* If no base IRQ was passed, figure out our allocated base */ | ||
305 | if (irq) | ||
306 | v->irq = irq; | ||
307 | else | ||
308 | v->irq = irq_find_mapping(v->domain, 0); | ||
285 | } | 309 | } |
286 | 310 | ||
287 | static void vic_ack_irq(struct irq_data *d) | 311 | static void vic_ack_irq(struct irq_data *d) |
@@ -409,10 +433,10 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, | |||
409 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | 433 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); |
410 | } | 434 | } |
411 | 435 | ||
412 | vic_register(base, irq_start, vic_sources, 0, node); | 436 | vic_register(base, 0, irq_start, vic_sources, 0, node); |
413 | } | 437 | } |
414 | 438 | ||
415 | void __init __vic_init(void __iomem *base, int irq_start, | 439 | void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, |
416 | u32 vic_sources, u32 resume_sources, | 440 | u32 vic_sources, u32 resume_sources, |
417 | struct device_node *node) | 441 | struct device_node *node) |
418 | { | 442 | { |
@@ -449,7 +473,7 @@ void __init __vic_init(void __iomem *base, int irq_start, | |||
449 | 473 | ||
450 | vic_init2(base); | 474 | vic_init2(base); |
451 | 475 | ||
452 | vic_register(base, irq_start, vic_sources, resume_sources, node); | 476 | vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node); |
453 | } | 477 | } |
454 | 478 | ||
455 | /** | 479 | /** |
@@ -462,8 +486,30 @@ void __init __vic_init(void __iomem *base, int irq_start, | |||
462 | void __init vic_init(void __iomem *base, unsigned int irq_start, | 486 | void __init vic_init(void __iomem *base, unsigned int irq_start, |
463 | u32 vic_sources, u32 resume_sources) | 487 | u32 vic_sources, u32 resume_sources) |
464 | { | 488 | { |
465 | __vic_init(base, irq_start, vic_sources, resume_sources, NULL); | 489 | __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); |
490 | } | ||
491 | |||
492 | /** | ||
493 | * vic_init_cascaded() - initialise a cascaded vectored interrupt controller | ||
494 | * @base: iomem base address | ||
495 | * @parent_irq: the parent IRQ we're cascaded off | ||
496 | * @irq_start: starting interrupt number, must be muliple of 32 | ||
497 | * @vic_sources: bitmask of interrupt sources to allow | ||
498 | * @resume_sources: bitmask of interrupt sources to allow for resume | ||
499 | * | ||
500 | * This returns the base for the new interrupts or negative on error. | ||
501 | */ | ||
502 | int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, | ||
503 | u32 vic_sources, u32 resume_sources) | ||
504 | { | ||
505 | struct vic_device *v; | ||
506 | |||
507 | v = &vic_devices[vic_id]; | ||
508 | __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); | ||
509 | /* Return out acquired base */ | ||
510 | return v->irq; | ||
466 | } | 511 | } |
512 | EXPORT_SYMBOL_GPL(vic_init_cascaded); | ||
467 | 513 | ||
468 | #ifdef CONFIG_OF | 514 | #ifdef CONFIG_OF |
469 | int __init vic_of_init(struct device_node *node, struct device_node *parent) | 515 | int __init vic_of_init(struct device_node *node, struct device_node *parent) |
@@ -485,7 +531,7 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) | |||
485 | /* | 531 | /* |
486 | * Passing 0 as first IRQ makes the simple domain allocate descriptors | 532 | * Passing 0 as first IRQ makes the simple domain allocate descriptors |
487 | */ | 533 | */ |
488 | __vic_init(regs, 0, interrupt_mask, wakeup_mask, node); | 534 | __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); |
489 | 535 | ||
490 | return 0; | 536 | return 0; |
491 | } | 537 | } |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 72156c123033..44c358ecf5a1 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -421,7 +421,7 @@ config LEDS_MC13783 | |||
421 | config LEDS_NS2 | 421 | config LEDS_NS2 |
422 | tristate "LED support for Network Space v2 GPIO LEDs" | 422 | tristate "LED support for Network Space v2 GPIO LEDs" |
423 | depends on LEDS_CLASS | 423 | depends on LEDS_CLASS |
424 | depends on ARCH_KIRKWOOD | 424 | depends on ARCH_KIRKWOOD || MACH_KIRKWOOD |
425 | default y | 425 | default y |
426 | help | 426 | help |
427 | This option enable support for the dual-GPIO LED found on the | 427 | This option enable support for the dual-GPIO LED found on the |
@@ -431,7 +431,7 @@ config LEDS_NS2 | |||
431 | config LEDS_NETXBIG | 431 | config LEDS_NETXBIG |
432 | tristate "LED support for Big Network series LEDs" | 432 | tristate "LED support for Big Network series LEDs" |
433 | depends on LEDS_CLASS | 433 | depends on LEDS_CLASS |
434 | depends on ARCH_KIRKWOOD | 434 | depends on ARCH_KIRKWOOD || MACH_KIRKWOOD |
435 | default y | 435 | default y |
436 | help | 436 | help |
437 | This option enable support for LEDs found on the LaCie 2Big | 437 | This option enable support for LEDs found on the LaCie 2Big |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index a4989ec6292e..8eb6a36f125a 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -746,28 +746,6 @@ static int nand_davinci_probe(struct platform_device *pdev) | |||
746 | goto err_clk_enable; | 746 | goto err_clk_enable; |
747 | } | 747 | } |
748 | 748 | ||
749 | /* | ||
750 | * Setup Async configuration register in case we did not boot from | ||
751 | * NAND and so bootloader did not bother to set it up. | ||
752 | */ | ||
753 | val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4); | ||
754 | |||
755 | /* Extended Wait is not valid and Select Strobe mode is not used */ | ||
756 | val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK); | ||
757 | if (info->chip.options & NAND_BUSWIDTH_16) | ||
758 | val |= 0x1; | ||
759 | |||
760 | davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val); | ||
761 | |||
762 | ret = 0; | ||
763 | if (info->timing) | ||
764 | ret = davinci_aemif_setup_timing(info->timing, info->base, | ||
765 | info->core_chipsel); | ||
766 | if (ret < 0) { | ||
767 | dev_dbg(&pdev->dev, "NAND timing values setup fail\n"); | ||
768 | goto err; | ||
769 | } | ||
770 | |||
771 | spin_lock_irq(&davinci_nand_lock); | 749 | spin_lock_irq(&davinci_nand_lock); |
772 | 750 | ||
773 | /* put CSxNAND into NAND mode */ | 751 | /* put CSxNAND into NAND mode */ |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 8d3c49cc500f..3bb05f17b9b4 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
@@ -27,7 +27,7 @@ config PHY_EXYNOS_MIPI_VIDEO | |||
27 | 27 | ||
28 | config PHY_MVEBU_SATA | 28 | config PHY_MVEBU_SATA |
29 | def_bool y | 29 | def_bool y |
30 | depends on ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE | 30 | depends on ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD |
31 | depends on OF | 31 | depends on OF |
32 | select GENERIC_PHY | 32 | select GENERIC_PHY |
33 | 33 | ||
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 6d452a78b19c..fa0e4e057b99 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig | |||
@@ -22,7 +22,7 @@ config POWER_RESET_GPIO | |||
22 | 22 | ||
23 | config POWER_RESET_MSM | 23 | config POWER_RESET_MSM |
24 | bool "Qualcomm MSM power-off driver" | 24 | bool "Qualcomm MSM power-off driver" |
25 | depends on POWER_RESET && ARCH_MSM | 25 | depends on POWER_RESET && ARCH_QCOM |
26 | help | 26 | help |
27 | Power off and restart support for Qualcomm boards. | 27 | Power off and restart support for Qualcomm boards. |
28 | 28 | ||
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c index 37f56f7ee926..a75db7f8a92f 100644 --- a/drivers/power/reset/qnap-poweroff.c +++ b/drivers/power/reset/qnap-poweroff.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * QNAP Turbo NAS Board power off | 2 | * QNAP Turbo NAS Board power off. Can also be used on Synology devices. |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch> | 4 | * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch> |
5 | * | 5 | * |
@@ -25,17 +25,43 @@ | |||
25 | 25 | ||
26 | #define UART1_REG(x) (base + ((UART_##x) << 2)) | 26 | #define UART1_REG(x) (base + ((UART_##x) << 2)) |
27 | 27 | ||
28 | struct power_off_cfg { | ||
29 | u32 baud; | ||
30 | char cmd; | ||
31 | }; | ||
32 | |||
33 | static const struct power_off_cfg qnap_power_off_cfg = { | ||
34 | .baud = 19200, | ||
35 | .cmd = 'A', | ||
36 | }; | ||
37 | |||
38 | static const struct power_off_cfg synology_power_off_cfg = { | ||
39 | .baud = 9600, | ||
40 | .cmd = '1', | ||
41 | }; | ||
42 | |||
43 | static const struct of_device_id qnap_power_off_of_match_table[] = { | ||
44 | { .compatible = "qnap,power-off", | ||
45 | .data = &qnap_power_off_cfg, | ||
46 | }, | ||
47 | { .compatible = "synology,power-off", | ||
48 | .data = &synology_power_off_cfg, | ||
49 | }, | ||
50 | {} | ||
51 | }; | ||
52 | MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table); | ||
53 | |||
28 | static void __iomem *base; | 54 | static void __iomem *base; |
29 | static unsigned long tclk; | 55 | static unsigned long tclk; |
56 | static const struct power_off_cfg *cfg; | ||
30 | 57 | ||
31 | static void qnap_power_off(void) | 58 | static void qnap_power_off(void) |
32 | { | 59 | { |
33 | /* 19200 baud divisor */ | 60 | const unsigned divisor = ((tclk + (8 * cfg->baud)) / (16 * cfg->baud)); |
34 | const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200)); | ||
35 | 61 | ||
36 | pr_err("%s: triggering power-off...\n", __func__); | 62 | pr_err("%s: triggering power-off...\n", __func__); |
37 | 63 | ||
38 | /* hijack UART1 and reset into sane state (19200,8n1) */ | 64 | /* hijack UART1 and reset into sane state */ |
39 | writel(0x83, UART1_REG(LCR)); | 65 | writel(0x83, UART1_REG(LCR)); |
40 | writel(divisor & 0xff, UART1_REG(DLL)); | 66 | writel(divisor & 0xff, UART1_REG(DLL)); |
41 | writel((divisor >> 8) & 0xff, UART1_REG(DLM)); | 67 | writel((divisor >> 8) & 0xff, UART1_REG(DLM)); |
@@ -44,16 +70,21 @@ static void qnap_power_off(void) | |||
44 | writel(0x00, UART1_REG(FCR)); | 70 | writel(0x00, UART1_REG(FCR)); |
45 | writel(0x00, UART1_REG(MCR)); | 71 | writel(0x00, UART1_REG(MCR)); |
46 | 72 | ||
47 | /* send the power-off command 'A' to PIC */ | 73 | /* send the power-off command to PIC */ |
48 | writel('A', UART1_REG(TX)); | 74 | writel(cfg->cmd, UART1_REG(TX)); |
49 | } | 75 | } |
50 | 76 | ||
51 | static int qnap_power_off_probe(struct platform_device *pdev) | 77 | static int qnap_power_off_probe(struct platform_device *pdev) |
52 | { | 78 | { |
79 | struct device_node *np = pdev->dev.of_node; | ||
53 | struct resource *res; | 80 | struct resource *res; |
54 | struct clk *clk; | 81 | struct clk *clk; |
55 | char symname[KSYM_NAME_LEN]; | 82 | char symname[KSYM_NAME_LEN]; |
56 | 83 | ||
84 | const struct of_device_id *match = | ||
85 | of_match_node(qnap_power_off_of_match_table, np); | ||
86 | cfg = match->data; | ||
87 | |||
57 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 88 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
58 | if (!res) { | 89 | if (!res) { |
59 | dev_err(&pdev->dev, "Missing resource"); | 90 | dev_err(&pdev->dev, "Missing resource"); |
@@ -94,12 +125,6 @@ static int qnap_power_off_remove(struct platform_device *pdev) | |||
94 | return 0; | 125 | return 0; |
95 | } | 126 | } |
96 | 127 | ||
97 | static const struct of_device_id qnap_power_off_of_match_table[] = { | ||
98 | { .compatible = "qnap,power-off", }, | ||
99 | {} | ||
100 | }; | ||
101 | MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table); | ||
102 | |||
103 | static struct platform_driver qnap_power_off_driver = { | 128 | static struct platform_driver qnap_power_off_driver = { |
104 | .probe = qnap_power_off_probe, | 129 | .probe = qnap_power_off_probe, |
105 | .remove = qnap_power_off_remove, | 130 | .remove = qnap_power_off_remove, |
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index c9d04f797862..0615f50a14cd 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig | |||
@@ -11,3 +11,5 @@ menuconfig RESET_CONTROLLER | |||
11 | via GPIOs or SoC-internal reset controller modules. | 11 | via GPIOs or SoC-internal reset controller modules. |
12 | 12 | ||
13 | If unsure, say no. | 13 | If unsure, say no. |
14 | |||
15 | source "drivers/reset/sti/Kconfig" | ||
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index cc29832c9638..4f60caf750ce 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-$(CONFIG_RESET_CONTROLLER) += core.o | 1 | obj-$(CONFIG_RESET_CONTROLLER) += core.o |
2 | obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o | 2 | obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o |
3 | obj-$(CONFIG_ARCH_STI) += sti/ | ||
diff --git a/drivers/reset/core.c b/drivers/reset/core.c index d1b6089a0ef8..baeaf82d40d9 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c | |||
@@ -43,7 +43,7 @@ struct reset_control { | |||
43 | * This simple translation function should be used for reset controllers | 43 | * This simple translation function should be used for reset controllers |
44 | * with 1:1 mapping, where reset lines can be indexed by number without gaps. | 44 | * with 1:1 mapping, where reset lines can be indexed by number without gaps. |
45 | */ | 45 | */ |
46 | int of_reset_simple_xlate(struct reset_controller_dev *rcdev, | 46 | static int of_reset_simple_xlate(struct reset_controller_dev *rcdev, |
47 | const struct of_phandle_args *reset_spec) | 47 | const struct of_phandle_args *reset_spec) |
48 | { | 48 | { |
49 | if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells)) | 49 | if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells)) |
@@ -54,7 +54,6 @@ int of_reset_simple_xlate(struct reset_controller_dev *rcdev, | |||
54 | 54 | ||
55 | return reset_spec->args[0]; | 55 | return reset_spec->args[0]; |
56 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(of_reset_simple_xlate); | ||
58 | 57 | ||
59 | /** | 58 | /** |
60 | * reset_controller_register - register a reset controller device | 59 | * reset_controller_register - register a reset controller device |
@@ -127,15 +126,16 @@ int reset_control_deassert(struct reset_control *rstc) | |||
127 | EXPORT_SYMBOL_GPL(reset_control_deassert); | 126 | EXPORT_SYMBOL_GPL(reset_control_deassert); |
128 | 127 | ||
129 | /** | 128 | /** |
130 | * reset_control_get - Lookup and obtain a reference to a reset controller. | 129 | * of_reset_control_get - Lookup and obtain a reference to a reset controller. |
131 | * @dev: device to be reset by the controller | 130 | * @node: device to be reset by the controller |
132 | * @id: reset line name | 131 | * @id: reset line name |
133 | * | 132 | * |
134 | * Returns a struct reset_control or IS_ERR() condition containing errno. | 133 | * Returns a struct reset_control or IS_ERR() condition containing errno. |
135 | * | 134 | * |
136 | * Use of id names is optional. | 135 | * Use of id names is optional. |
137 | */ | 136 | */ |
138 | struct reset_control *reset_control_get(struct device *dev, const char *id) | 137 | struct reset_control *of_reset_control_get(struct device_node *node, |
138 | const char *id) | ||
139 | { | 139 | { |
140 | struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER); | 140 | struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER); |
141 | struct reset_controller_dev *r, *rcdev; | 141 | struct reset_controller_dev *r, *rcdev; |
@@ -144,13 +144,10 @@ struct reset_control *reset_control_get(struct device *dev, const char *id) | |||
144 | int rstc_id; | 144 | int rstc_id; |
145 | int ret; | 145 | int ret; |
146 | 146 | ||
147 | if (!dev) | ||
148 | return ERR_PTR(-EINVAL); | ||
149 | |||
150 | if (id) | 147 | if (id) |
151 | index = of_property_match_string(dev->of_node, | 148 | index = of_property_match_string(node, |
152 | "reset-names", id); | 149 | "reset-names", id); |
153 | ret = of_parse_phandle_with_args(dev->of_node, "resets", "#reset-cells", | 150 | ret = of_parse_phandle_with_args(node, "resets", "#reset-cells", |
154 | index, &args); | 151 | index, &args); |
155 | if (ret) | 152 | if (ret) |
156 | return ERR_PTR(ret); | 153 | return ERR_PTR(ret); |
@@ -167,7 +164,7 @@ struct reset_control *reset_control_get(struct device *dev, const char *id) | |||
167 | 164 | ||
168 | if (!rcdev) { | 165 | if (!rcdev) { |
169 | mutex_unlock(&reset_controller_list_mutex); | 166 | mutex_unlock(&reset_controller_list_mutex); |
170 | return ERR_PTR(-ENODEV); | 167 | return ERR_PTR(-EPROBE_DEFER); |
171 | } | 168 | } |
172 | 169 | ||
173 | rstc_id = rcdev->of_xlate(rcdev, &args); | 170 | rstc_id = rcdev->of_xlate(rcdev, &args); |
@@ -185,12 +182,35 @@ struct reset_control *reset_control_get(struct device *dev, const char *id) | |||
185 | return ERR_PTR(-ENOMEM); | 182 | return ERR_PTR(-ENOMEM); |
186 | } | 183 | } |
187 | 184 | ||
188 | rstc->dev = dev; | ||
189 | rstc->rcdev = rcdev; | 185 | rstc->rcdev = rcdev; |
190 | rstc->id = rstc_id; | 186 | rstc->id = rstc_id; |
191 | 187 | ||
192 | return rstc; | 188 | return rstc; |
193 | } | 189 | } |
190 | EXPORT_SYMBOL_GPL(of_reset_control_get); | ||
191 | |||
192 | /** | ||
193 | * reset_control_get - Lookup and obtain a reference to a reset controller. | ||
194 | * @dev: device to be reset by the controller | ||
195 | * @id: reset line name | ||
196 | * | ||
197 | * Returns a struct reset_control or IS_ERR() condition containing errno. | ||
198 | * | ||
199 | * Use of id names is optional. | ||
200 | */ | ||
201 | struct reset_control *reset_control_get(struct device *dev, const char *id) | ||
202 | { | ||
203 | struct reset_control *rstc; | ||
204 | |||
205 | if (!dev) | ||
206 | return ERR_PTR(-EINVAL); | ||
207 | |||
208 | rstc = of_reset_control_get(dev->of_node, id); | ||
209 | if (!IS_ERR(rstc)) | ||
210 | rstc->dev = dev; | ||
211 | |||
212 | return rstc; | ||
213 | } | ||
194 | EXPORT_SYMBOL_GPL(reset_control_get); | 214 | EXPORT_SYMBOL_GPL(reset_control_get); |
195 | 215 | ||
196 | /** | 216 | /** |
@@ -243,33 +263,6 @@ struct reset_control *devm_reset_control_get(struct device *dev, const char *id) | |||
243 | } | 263 | } |
244 | EXPORT_SYMBOL_GPL(devm_reset_control_get); | 264 | EXPORT_SYMBOL_GPL(devm_reset_control_get); |
245 | 265 | ||
246 | static int devm_reset_control_match(struct device *dev, void *res, void *data) | ||
247 | { | ||
248 | struct reset_control **rstc = res; | ||
249 | if (WARN_ON(!rstc || !*rstc)) | ||
250 | return 0; | ||
251 | return *rstc == data; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * devm_reset_control_put - resource managed reset_control_put() | ||
256 | * @rstc: reset controller to free | ||
257 | * | ||
258 | * Deallocate a reset control allocated withd devm_reset_control_get(). | ||
259 | * This function will not need to be called normally, as devres will take | ||
260 | * care of freeing the resource. | ||
261 | */ | ||
262 | void devm_reset_control_put(struct reset_control *rstc) | ||
263 | { | ||
264 | int ret; | ||
265 | |||
266 | ret = devres_release(rstc->dev, devm_reset_control_release, | ||
267 | devm_reset_control_match, rstc); | ||
268 | if (ret) | ||
269 | WARN_ON(ret); | ||
270 | } | ||
271 | EXPORT_SYMBOL_GPL(devm_reset_control_put); | ||
272 | |||
273 | /** | 266 | /** |
274 | * device_reset - find reset controller associated with the device | 267 | * device_reset - find reset controller associated with the device |
275 | * and perform reset | 268 | * and perform reset |
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig new file mode 100644 index 000000000000..88d2d0316613 --- /dev/null +++ b/drivers/reset/sti/Kconfig | |||
@@ -0,0 +1,15 @@ | |||
1 | if ARCH_STI | ||
2 | |||
3 | config STI_RESET_SYSCFG | ||
4 | bool | ||
5 | select RESET_CONTROLLER | ||
6 | |||
7 | config STIH415_RESET | ||
8 | bool | ||
9 | select STI_RESET_SYSCFG | ||
10 | |||
11 | config STIH416_RESET | ||
12 | bool | ||
13 | select STI_RESET_SYSCFG | ||
14 | |||
15 | endif | ||
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile new file mode 100644 index 000000000000..be1c97647871 --- /dev/null +++ b/drivers/reset/sti/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o | ||
2 | |||
3 | obj-$(CONFIG_STIH415_RESET) += reset-stih415.o | ||
4 | obj-$(CONFIG_STIH416_RESET) += reset-stih416.o | ||
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c new file mode 100644 index 000000000000..e6f6c41abe12 --- /dev/null +++ b/drivers/reset/sti/reset-stih415.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 STMicroelectronics (R&D) Limited | ||
3 | * Author: Stephen Gallimore <stephen.gallimore@st.com> | ||
4 | * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/of.h> | ||
13 | #include <linux/of_platform.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | |||
16 | #include <dt-bindings/reset-controller/stih415-resets.h> | ||
17 | |||
18 | #include "reset-syscfg.h" | ||
19 | |||
20 | /* | ||
21 | * STiH415 Peripheral powerdown definitions. | ||
22 | */ | ||
23 | static const char stih415_front[] = "st,stih415-front-syscfg"; | ||
24 | static const char stih415_rear[] = "st,stih415-rear-syscfg"; | ||
25 | static const char stih415_sbc[] = "st,stih415-sbc-syscfg"; | ||
26 | static const char stih415_lpm[] = "st,stih415-lpm-syscfg"; | ||
27 | |||
28 | #define STIH415_PDN_FRONT(_bit) \ | ||
29 | _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit) | ||
30 | |||
31 | #define STIH415_PDN_REAR(_cntl, _stat) \ | ||
32 | _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat) | ||
33 | |||
34 | #define STIH415_SRST_REAR(_reg, _bit) \ | ||
35 | _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit) | ||
36 | |||
37 | #define STIH415_SRST_SBC(_reg, _bit) \ | ||
38 | _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit) | ||
39 | |||
40 | #define STIH415_SRST_FRONT(_reg, _bit) \ | ||
41 | _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit) | ||
42 | |||
43 | #define STIH415_SRST_LPM(_reg, _bit) \ | ||
44 | _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit) | ||
45 | |||
46 | #define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */ | ||
47 | #define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */ | ||
48 | |||
49 | #define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */ | ||
50 | #define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */ | ||
51 | |||
52 | #define SYSCFG_376 0x130 /* Reset generator 0 control 0 */ | ||
53 | #define SYSCFG_166 0x108 /* Softreset Ethernet 0 */ | ||
54 | #define SYSCFG_31 0x7c /* Softreset Ethernet 1 */ | ||
55 | #define LPM_SYSCFG_1 0x4 /* Softreset IRB */ | ||
56 | |||
57 | static const struct syscfg_reset_channel_data stih415_powerdowns[] = { | ||
58 | [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0), | ||
59 | [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1), | ||
60 | [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2), | ||
61 | [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0), | ||
62 | [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1), | ||
63 | [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2), | ||
64 | [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3), | ||
65 | [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4), | ||
66 | [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8), | ||
67 | }; | ||
68 | |||
69 | static const struct syscfg_reset_channel_data stih415_softresets[] = { | ||
70 | [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0), | ||
71 | [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0), | ||
72 | [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6), | ||
73 | [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9), | ||
74 | [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10), | ||
75 | [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11), | ||
76 | }; | ||
77 | |||
78 | static struct syscfg_reset_controller_data stih415_powerdown_controller = { | ||
79 | .wait_for_ack = true, | ||
80 | .nr_channels = ARRAY_SIZE(stih415_powerdowns), | ||
81 | .channels = stih415_powerdowns, | ||
82 | }; | ||
83 | |||
84 | static struct syscfg_reset_controller_data stih415_softreset_controller = { | ||
85 | .wait_for_ack = false, | ||
86 | .active_low = true, | ||
87 | .nr_channels = ARRAY_SIZE(stih415_softresets), | ||
88 | .channels = stih415_softresets, | ||
89 | }; | ||
90 | |||
91 | static struct of_device_id stih415_reset_match[] = { | ||
92 | { .compatible = "st,stih415-powerdown", | ||
93 | .data = &stih415_powerdown_controller, }, | ||
94 | { .compatible = "st,stih415-softreset", | ||
95 | .data = &stih415_softreset_controller, }, | ||
96 | {}, | ||
97 | }; | ||
98 | |||
99 | static struct platform_driver stih415_reset_driver = { | ||
100 | .probe = syscfg_reset_probe, | ||
101 | .driver = { | ||
102 | .name = "reset-stih415", | ||
103 | .owner = THIS_MODULE, | ||
104 | .of_match_table = stih415_reset_match, | ||
105 | }, | ||
106 | }; | ||
107 | |||
108 | static int __init stih415_reset_init(void) | ||
109 | { | ||
110 | return platform_driver_register(&stih415_reset_driver); | ||
111 | } | ||
112 | arch_initcall(stih415_reset_init); | ||
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c new file mode 100644 index 000000000000..fe3bf02bdc8c --- /dev/null +++ b/drivers/reset/sti/reset-stih416.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 STMicroelectronics (R&D) Limited | ||
3 | * Author: Stephen Gallimore <stephen.gallimore@st.com> | ||
4 | * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/of.h> | ||
13 | #include <linux/of_platform.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | |||
16 | #include <dt-bindings/reset-controller/stih416-resets.h> | ||
17 | |||
18 | #include "reset-syscfg.h" | ||
19 | |||
20 | /* | ||
21 | * STiH416 Peripheral powerdown definitions. | ||
22 | */ | ||
23 | static const char stih416_front[] = "st,stih416-front-syscfg"; | ||
24 | static const char stih416_rear[] = "st,stih416-rear-syscfg"; | ||
25 | static const char stih416_sbc[] = "st,stih416-sbc-syscfg"; | ||
26 | static const char stih416_lpm[] = "st,stih416-lpm-syscfg"; | ||
27 | static const char stih416_cpu[] = "st,stih416-cpu-syscfg"; | ||
28 | |||
29 | #define STIH416_PDN_FRONT(_bit) \ | ||
30 | _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit) | ||
31 | |||
32 | #define STIH416_PDN_REAR(_cntl, _stat) \ | ||
33 | _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat) | ||
34 | |||
35 | #define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */ | ||
36 | #define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */ | ||
37 | |||
38 | #define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */ | ||
39 | #define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */ | ||
40 | |||
41 | #define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */ | ||
42 | #define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */ | ||
43 | #define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */ | ||
44 | #define LPM_SYSCFG_1 0x4 /* Softreset IRB */ | ||
45 | #define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */ | ||
46 | #define SYSCFG_7563 0x8cc /* MPE softresets 0 */ | ||
47 | #define SYSCFG_7564 0x8d0 /* MPE softresets 1 */ | ||
48 | |||
49 | #define STIH416_SRST_CPU(_reg, _bit) \ | ||
50 | _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit) | ||
51 | |||
52 | #define STIH416_SRST_FRONT(_reg, _bit) \ | ||
53 | _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit) | ||
54 | |||
55 | #define STIH416_SRST_REAR(_reg, _bit) \ | ||
56 | _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit) | ||
57 | |||
58 | #define STIH416_SRST_LPM(_reg, _bit) \ | ||
59 | _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit) | ||
60 | |||
61 | #define STIH416_SRST_SBC(_reg, _bit) \ | ||
62 | _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit) | ||
63 | |||
64 | static const struct syscfg_reset_channel_data stih416_powerdowns[] = { | ||
65 | [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0), | ||
66 | [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1), | ||
67 | [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2), | ||
68 | [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0), | ||
69 | [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1), | ||
70 | [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2), | ||
71 | [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5), | ||
72 | [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3), | ||
73 | [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4), | ||
74 | [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9), | ||
75 | [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8), | ||
76 | }; | ||
77 | |||
78 | static const struct syscfg_reset_channel_data stih416_softresets[] = { | ||
79 | [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0), | ||
80 | [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0), | ||
81 | [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6), | ||
82 | [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9), | ||
83 | [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10), | ||
84 | [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11), | ||
85 | [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28), | ||
86 | [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7), | ||
87 | [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3), | ||
88 | [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15), | ||
89 | [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2), | ||
90 | [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14), | ||
91 | [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5), | ||
92 | [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25), | ||
93 | [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26), | ||
94 | [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5), | ||
95 | [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6), | ||
96 | [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10), | ||
97 | [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11), | ||
98 | [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18), | ||
99 | [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19), | ||
100 | [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21), | ||
101 | [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23), | ||
102 | [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2), | ||
103 | [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3), | ||
104 | [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4), | ||
105 | [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10), | ||
106 | [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16), | ||
107 | }; | ||
108 | |||
109 | static struct syscfg_reset_controller_data stih416_powerdown_controller = { | ||
110 | .wait_for_ack = true, | ||
111 | .nr_channels = ARRAY_SIZE(stih416_powerdowns), | ||
112 | .channels = stih416_powerdowns, | ||
113 | }; | ||
114 | |||
115 | static struct syscfg_reset_controller_data stih416_softreset_controller = { | ||
116 | .wait_for_ack = false, | ||
117 | .active_low = true, | ||
118 | .nr_channels = ARRAY_SIZE(stih416_softresets), | ||
119 | .channels = stih416_softresets, | ||
120 | }; | ||
121 | |||
122 | static struct of_device_id stih416_reset_match[] = { | ||
123 | { .compatible = "st,stih416-powerdown", | ||
124 | .data = &stih416_powerdown_controller, }, | ||
125 | { .compatible = "st,stih416-softreset", | ||
126 | .data = &stih416_softreset_controller, }, | ||
127 | {}, | ||
128 | }; | ||
129 | |||
130 | static struct platform_driver stih416_reset_driver = { | ||
131 | .probe = syscfg_reset_probe, | ||
132 | .driver = { | ||
133 | .name = "reset-stih416", | ||
134 | .owner = THIS_MODULE, | ||
135 | .of_match_table = stih416_reset_match, | ||
136 | }, | ||
137 | }; | ||
138 | |||
139 | static int __init stih416_reset_init(void) | ||
140 | { | ||
141 | return platform_driver_register(&stih416_reset_driver); | ||
142 | } | ||
143 | arch_initcall(stih416_reset_init); | ||
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c new file mode 100644 index 000000000000..a145cc066d4a --- /dev/null +++ b/drivers/reset/sti/reset-syscfg.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 STMicroelectronics Limited | ||
3 | * Author: Stephen Gallimore <stephen.gallimore@st.com> | ||
4 | * | ||
5 | * Inspired by mach-imx/src.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/regmap.h> | ||
19 | #include <linux/mfd/syscon.h> | ||
20 | |||
21 | #include "reset-syscfg.h" | ||
22 | |||
23 | /** | ||
24 | * Reset channel regmap configuration | ||
25 | * | ||
26 | * @reset: regmap field for the channel's reset bit. | ||
27 | * @ack: regmap field for the channel's ack bit (optional). | ||
28 | */ | ||
29 | struct syscfg_reset_channel { | ||
30 | struct regmap_field *reset; | ||
31 | struct regmap_field *ack; | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * A reset controller which groups together a set of related reset bits, which | ||
36 | * may be located in different system configuration registers. | ||
37 | * | ||
38 | * @rst: base reset controller structure. | ||
39 | * @active_low: are the resets in this controller active low, i.e. clearing | ||
40 | * the reset bit puts the hardware into reset. | ||
41 | * @channels: An array of reset channels for this controller. | ||
42 | */ | ||
43 | struct syscfg_reset_controller { | ||
44 | struct reset_controller_dev rst; | ||
45 | bool active_low; | ||
46 | struct syscfg_reset_channel *channels; | ||
47 | }; | ||
48 | |||
49 | #define to_syscfg_reset_controller(_rst) \ | ||
50 | container_of(_rst, struct syscfg_reset_controller, rst) | ||
51 | |||
52 | static int syscfg_reset_program_hw(struct reset_controller_dev *rcdev, | ||
53 | unsigned long idx, int assert) | ||
54 | { | ||
55 | struct syscfg_reset_controller *rst = to_syscfg_reset_controller(rcdev); | ||
56 | const struct syscfg_reset_channel *ch; | ||
57 | u32 ctrl_val = rst->active_low ? !assert : !!assert; | ||
58 | int err; | ||
59 | |||
60 | if (idx >= rcdev->nr_resets) | ||
61 | return -EINVAL; | ||
62 | |||
63 | ch = &rst->channels[idx]; | ||
64 | |||
65 | err = regmap_field_write(ch->reset, ctrl_val); | ||
66 | if (err) | ||
67 | return err; | ||
68 | |||
69 | if (ch->ack) { | ||
70 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
71 | u32 ack_val; | ||
72 | |||
73 | while (true) { | ||
74 | err = regmap_field_read(ch->ack, &ack_val); | ||
75 | if (err) | ||
76 | return err; | ||
77 | |||
78 | if (ack_val == ctrl_val) | ||
79 | break; | ||
80 | |||
81 | if (time_after(jiffies, timeout)) | ||
82 | return -ETIME; | ||
83 | |||
84 | cpu_relax(); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int syscfg_reset_assert(struct reset_controller_dev *rcdev, | ||
92 | unsigned long idx) | ||
93 | { | ||
94 | return syscfg_reset_program_hw(rcdev, idx, true); | ||
95 | } | ||
96 | |||
97 | static int syscfg_reset_deassert(struct reset_controller_dev *rcdev, | ||
98 | unsigned long idx) | ||
99 | { | ||
100 | return syscfg_reset_program_hw(rcdev, idx, false); | ||
101 | } | ||
102 | |||
103 | static int syscfg_reset_dev(struct reset_controller_dev *rcdev, | ||
104 | unsigned long idx) | ||
105 | { | ||
106 | int err = syscfg_reset_assert(rcdev, idx); | ||
107 | if (err) | ||
108 | return err; | ||
109 | |||
110 | return syscfg_reset_deassert(rcdev, idx); | ||
111 | } | ||
112 | |||
113 | static struct reset_control_ops syscfg_reset_ops = { | ||
114 | .reset = syscfg_reset_dev, | ||
115 | .assert = syscfg_reset_assert, | ||
116 | .deassert = syscfg_reset_deassert, | ||
117 | }; | ||
118 | |||
119 | static int syscfg_reset_controller_register(struct device *dev, | ||
120 | const struct syscfg_reset_controller_data *data) | ||
121 | { | ||
122 | struct syscfg_reset_controller *rc; | ||
123 | size_t size; | ||
124 | int i, err; | ||
125 | |||
126 | rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL); | ||
127 | if (!rc) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | size = sizeof(struct syscfg_reset_channel) * data->nr_channels; | ||
131 | |||
132 | rc->channels = devm_kzalloc(dev, size, GFP_KERNEL); | ||
133 | if (!rc->channels) | ||
134 | return -ENOMEM; | ||
135 | |||
136 | rc->rst.ops = &syscfg_reset_ops, | ||
137 | rc->rst.of_node = dev->of_node; | ||
138 | rc->rst.nr_resets = data->nr_channels; | ||
139 | rc->active_low = data->active_low; | ||
140 | |||
141 | for (i = 0; i < data->nr_channels; i++) { | ||
142 | struct regmap *map; | ||
143 | struct regmap_field *f; | ||
144 | const char *compatible = data->channels[i].compatible; | ||
145 | |||
146 | map = syscon_regmap_lookup_by_compatible(compatible); | ||
147 | if (IS_ERR(map)) | ||
148 | return PTR_ERR(map); | ||
149 | |||
150 | f = devm_regmap_field_alloc(dev, map, data->channels[i].reset); | ||
151 | if (IS_ERR(f)) | ||
152 | return PTR_ERR(f); | ||
153 | |||
154 | rc->channels[i].reset = f; | ||
155 | |||
156 | if (!data->wait_for_ack) | ||
157 | continue; | ||
158 | |||
159 | f = devm_regmap_field_alloc(dev, map, data->channels[i].ack); | ||
160 | if (IS_ERR(f)) | ||
161 | return PTR_ERR(f); | ||
162 | |||
163 | rc->channels[i].ack = f; | ||
164 | } | ||
165 | |||
166 | err = reset_controller_register(&rc->rst); | ||
167 | if (!err) | ||
168 | dev_info(dev, "registered\n"); | ||
169 | |||
170 | return err; | ||
171 | } | ||
172 | |||
173 | int syscfg_reset_probe(struct platform_device *pdev) | ||
174 | { | ||
175 | struct device *dev = pdev ? &pdev->dev : NULL; | ||
176 | const struct of_device_id *match; | ||
177 | |||
178 | if (!dev || !dev->driver) | ||
179 | return -ENODEV; | ||
180 | |||
181 | match = of_match_device(dev->driver->of_match_table, dev); | ||
182 | if (!match || !match->data) | ||
183 | return -EINVAL; | ||
184 | |||
185 | return syscfg_reset_controller_register(dev, match->data); | ||
186 | } | ||
diff --git a/drivers/reset/sti/reset-syscfg.h b/drivers/reset/sti/reset-syscfg.h new file mode 100644 index 000000000000..2cc2283bac40 --- /dev/null +++ b/drivers/reset/sti/reset-syscfg.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 STMicroelectronics (R&D) Limited | ||
3 | * Author: Stephen Gallimore <stephen.gallimore@st.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | #ifndef __STI_RESET_SYSCFG_H | ||
11 | #define __STI_RESET_SYSCFG_H | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/regmap.h> | ||
15 | #include <linux/reset-controller.h> | ||
16 | |||
17 | /** | ||
18 | * Reset channel description for a system configuration register based | ||
19 | * reset controller. | ||
20 | * | ||
21 | * @compatible: Compatible string of the syscon regmap containing this | ||
22 | * channel's control and ack (status) bits. | ||
23 | * @reset: Regmap field description of the channel's reset bit. | ||
24 | * @ack: Regmap field description of the channel's acknowledge bit. | ||
25 | */ | ||
26 | struct syscfg_reset_channel_data { | ||
27 | const char *compatible; | ||
28 | struct reg_field reset; | ||
29 | struct reg_field ack; | ||
30 | }; | ||
31 | |||
32 | #define _SYSCFG_RST_CH(_c, _rr, _rb, _ar, _ab) \ | ||
33 | { .compatible = _c, \ | ||
34 | .reset = REG_FIELD(_rr, _rb, _rb), \ | ||
35 | .ack = REG_FIELD(_ar, _ab, _ab), } | ||
36 | |||
37 | #define _SYSCFG_RST_CH_NO_ACK(_c, _rr, _rb) \ | ||
38 | { .compatible = _c, \ | ||
39 | .reset = REG_FIELD(_rr, _rb, _rb), } | ||
40 | |||
41 | /** | ||
42 | * Description of a system configuration register based reset controller. | ||
43 | * | ||
44 | * @wait_for_ack: The controller will wait for reset assert and de-assert to | ||
45 | * be "ack'd" in a channel's ack field. | ||
46 | * @active_low: Are the resets in this controller active low, i.e. clearing | ||
47 | * the reset bit puts the hardware into reset. | ||
48 | * @nr_channels: The number of reset channels in this controller. | ||
49 | * @channels: An array of reset channel descriptions. | ||
50 | */ | ||
51 | struct syscfg_reset_controller_data { | ||
52 | bool wait_for_ack; | ||
53 | bool active_low; | ||
54 | int nr_channels; | ||
55 | const struct syscfg_reset_channel_data *channels; | ||
56 | }; | ||
57 | |||
58 | /** | ||
59 | * syscfg_reset_probe(): platform device probe function used by syscfg | ||
60 | * reset controller drivers. This registers a reset | ||
61 | * controller configured by the OF match data for | ||
62 | * the compatible device which should be of type | ||
63 | * "struct syscfg_reset_controller_data". | ||
64 | * | ||
65 | * @pdev: platform device | ||
66 | */ | ||
67 | int syscfg_reset_probe(struct platform_device *pdev); | ||
68 | |||
69 | #endif /* __STI_RESET_SYSCFG_H */ | ||
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c index 7e5ead936a04..41bd76aaff76 100644 --- a/drivers/rtc/rtc-isl12057.c +++ b/drivers/rtc/rtc-isl12057.c | |||
@@ -274,10 +274,7 @@ static int isl12057_probe(struct i2c_client *client, | |||
274 | dev_set_drvdata(dev, data); | 274 | dev_set_drvdata(dev, data); |
275 | 275 | ||
276 | rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE); | 276 | rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE); |
277 | if (IS_ERR(rtc)) | 277 | return PTR_ERR_OR_ZERO(rtc); |
278 | return PTR_ERR(rtc); | ||
279 | |||
280 | return 0; | ||
281 | } | 278 | } |
282 | 279 | ||
283 | #ifdef CONFIG_OF | 280 | #ifdef CONFIG_OF |
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index d536c5962c99..d15a999363fc 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c | |||
@@ -222,6 +222,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev) | |||
222 | struct resource *res; | 222 | struct resource *res; |
223 | struct rtc_plat_data *pdata; | 223 | struct rtc_plat_data *pdata; |
224 | u32 rtc_time; | 224 | u32 rtc_time; |
225 | u32 rtc_date; | ||
225 | int ret = 0; | 226 | int ret = 0; |
226 | 227 | ||
227 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 228 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
@@ -257,6 +258,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev) | |||
257 | } | 258 | } |
258 | } | 259 | } |
259 | 260 | ||
261 | /* | ||
262 | * A date after January 19th, 2038 does not fit on 32 bits and | ||
263 | * will confuse the kernel and userspace. Reset to a sane date | ||
264 | * (January 1st, 2013) if we're after 2038. | ||
265 | */ | ||
266 | rtc_date = readl(pdata->ioaddr + RTC_DATE_REG_OFFS); | ||
267 | if (bcd2bin((rtc_date >> RTC_YEAR_OFFS) & 0xff) >= 38) { | ||
268 | dev_info(&pdev->dev, "invalid RTC date, resetting to January 1st, 2013\n"); | ||
269 | writel(0x130101, pdata->ioaddr + RTC_DATE_REG_OFFS); | ||
270 | } | ||
271 | |||
260 | pdata->irq = platform_get_irq(pdev, 0); | 272 | pdata->irq = platform_get_irq(pdev, 0); |
261 | 273 | ||
262 | platform_set_drvdata(pdev, pdata); | 274 | platform_set_drvdata(pdev, pdata); |
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 1ebe67cd1833..7442bc130055 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c | |||
@@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk) | |||
36 | iowrite32(value, clk->mapped_reg); | 36 | iowrite32(value, clk->mapped_reg); |
37 | } | 37 | } |
38 | 38 | ||
39 | static unsigned int r8(const void __iomem *addr) | ||
40 | { | ||
41 | return ioread8(addr); | ||
42 | } | ||
43 | |||
44 | static unsigned int r16(const void __iomem *addr) | ||
45 | { | ||
46 | return ioread16(addr); | ||
47 | } | ||
48 | |||
49 | static unsigned int r32(const void __iomem *addr) | ||
50 | { | ||
51 | return ioread32(addr); | ||
52 | } | ||
53 | |||
39 | static int sh_clk_mstp_enable(struct clk *clk) | 54 | static int sh_clk_mstp_enable(struct clk *clk) |
40 | { | 55 | { |
41 | sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); | 56 | sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); |
57 | if (clk->status_reg) { | ||
58 | unsigned int (*read)(const void __iomem *addr); | ||
59 | int i; | ||
60 | void __iomem *mapped_status = (phys_addr_t)clk->status_reg - | ||
61 | (phys_addr_t)clk->enable_reg + clk->mapped_reg; | ||
62 | |||
63 | if (clk->flags & CLK_ENABLE_REG_8BIT) | ||
64 | read = r8; | ||
65 | else if (clk->flags & CLK_ENABLE_REG_16BIT) | ||
66 | read = r16; | ||
67 | else | ||
68 | read = r32; | ||
69 | |||
70 | for (i = 1000; | ||
71 | (read(mapped_status) & (1 << clk->enable_bit)) && i; | ||
72 | i--) | ||
73 | cpu_relax(); | ||
74 | if (!i) { | ||
75 | pr_err("cpg: failed to enable %p[%d]\n", | ||
76 | clk->enable_reg, clk->enable_bit); | ||
77 | return -ETIMEDOUT; | ||
78 | } | ||
79 | } | ||
42 | return 0; | 80 | return 0; |
43 | } | 81 | } |
44 | 82 | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 5f88d767671e..2d51912a6e40 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -143,7 +143,7 @@ config RCAR_THERMAL | |||
143 | 143 | ||
144 | config KIRKWOOD_THERMAL | 144 | config KIRKWOOD_THERMAL |
145 | tristate "Temperature sensor on Marvell Kirkwood SoCs" | 145 | tristate "Temperature sensor on Marvell Kirkwood SoCs" |
146 | depends on ARCH_KIRKWOOD | 146 | depends on ARCH_KIRKWOOD || MACH_KIRKWOOD |
147 | depends on OF | 147 | depends on OF |
148 | help | 148 | help |
149 | Support for the Kirkwood thermal sensor driver into the Linux thermal | 149 | Support for the Kirkwood thermal sensor driver into the Linux thermal |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 2577d67bacb2..2e6d8ddc4425 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1024,7 +1024,7 @@ config SERIAL_SGI_IOC3 | |||
1024 | 1024 | ||
1025 | config SERIAL_MSM | 1025 | config SERIAL_MSM |
1026 | bool "MSM on-chip serial port support" | 1026 | bool "MSM on-chip serial port support" |
1027 | depends on ARCH_MSM | 1027 | depends on ARCH_MSM || ARCH_QCOM |
1028 | select SERIAL_CORE | 1028 | select SERIAL_CORE |
1029 | 1029 | ||
1030 | config SERIAL_MSM_CONSOLE | 1030 | config SERIAL_MSM_CONSOLE |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0c6048d5c9a3..74ec8fc5cc03 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -301,7 +301,7 @@ config DAVINCI_WATCHDOG | |||
301 | 301 | ||
302 | config ORION_WATCHDOG | 302 | config ORION_WATCHDOG |
303 | tristate "Orion watchdog" | 303 | tristate "Orion watchdog" |
304 | depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE | 304 | depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU |
305 | select WATCHDOG_CORE | 305 | select WATCHDOG_CORE |
306 | help | 306 | help |
307 | Say Y here if to include support for the watchdog timer | 307 | Say Y here if to include support for the watchdog timer |
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 498163497c1c..9b3c41d18703 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c | |||
@@ -18,101 +18,204 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/watchdog.h> | 20 | #include <linux/watchdog.h> |
21 | #include <linux/interrupt.h> | ||
21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <mach/bridge-regs.h> | 26 | #include <linux/of_device.h> |
27 | |||
28 | /* RSTOUT mask register physical address for Orion5x, Kirkwood and Dove */ | ||
29 | #define ORION_RSTOUT_MASK_OFFSET 0x20108 | ||
30 | |||
31 | /* Internal registers can be configured at any 1 MiB aligned address */ | ||
32 | #define INTERNAL_REGS_MASK ~(SZ_1M - 1) | ||
27 | 33 | ||
28 | /* | 34 | /* |
29 | * Watchdog timer block registers. | 35 | * Watchdog timer block registers. |
30 | */ | 36 | */ |
31 | #define TIMER_CTRL 0x0000 | 37 | #define TIMER_CTRL 0x0000 |
32 | #define WDT_EN 0x0010 | 38 | #define TIMER_A370_STATUS 0x04 |
33 | #define WDT_VAL 0x0024 | ||
34 | 39 | ||
35 | #define WDT_MAX_CYCLE_COUNT 0xffffffff | 40 | #define WDT_MAX_CYCLE_COUNT 0xffffffff |
36 | #define WDT_IN_USE 0 | ||
37 | #define WDT_OK_TO_CLOSE 1 | ||
38 | 41 | ||
39 | #define WDT_RESET_OUT_EN BIT(1) | 42 | #define WDT_A370_RATIO_MASK(v) ((v) << 16) |
40 | #define WDT_INT_REQ BIT(3) | 43 | #define WDT_A370_RATIO_SHIFT 5 |
44 | #define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT) | ||
45 | |||
46 | #define WDT_AXP_FIXED_ENABLE_BIT BIT(10) | ||
47 | #define WDT_A370_EXPIRED BIT(31) | ||
41 | 48 | ||
42 | static bool nowayout = WATCHDOG_NOWAYOUT; | 49 | static bool nowayout = WATCHDOG_NOWAYOUT; |
43 | static int heartbeat = -1; /* module parameter (seconds) */ | 50 | static int heartbeat = -1; /* module parameter (seconds) */ |
44 | static unsigned int wdt_max_duration; /* (seconds) */ | ||
45 | static struct clk *clk; | ||
46 | static unsigned int wdt_tclk; | ||
47 | static void __iomem *wdt_reg; | ||
48 | static DEFINE_SPINLOCK(wdt_lock); | ||
49 | 51 | ||
50 | static int orion_wdt_ping(struct watchdog_device *wdt_dev) | 52 | struct orion_watchdog; |
53 | |||
54 | struct orion_watchdog_data { | ||
55 | int wdt_counter_offset; | ||
56 | int wdt_enable_bit; | ||
57 | int rstout_enable_bit; | ||
58 | int (*clock_init)(struct platform_device *, | ||
59 | struct orion_watchdog *); | ||
60 | int (*start)(struct watchdog_device *); | ||
61 | }; | ||
62 | |||
63 | struct orion_watchdog { | ||
64 | struct watchdog_device wdt; | ||
65 | void __iomem *reg; | ||
66 | void __iomem *rstout; | ||
67 | unsigned long clk_rate; | ||
68 | struct clk *clk; | ||
69 | const struct orion_watchdog_data *data; | ||
70 | }; | ||
71 | |||
72 | static int orion_wdt_clock_init(struct platform_device *pdev, | ||
73 | struct orion_watchdog *dev) | ||
51 | { | 74 | { |
52 | spin_lock(&wdt_lock); | 75 | int ret; |
53 | 76 | ||
54 | /* Reload watchdog duration */ | 77 | dev->clk = clk_get(&pdev->dev, NULL); |
55 | writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL); | 78 | if (IS_ERR(dev->clk)) |
79 | return PTR_ERR(dev->clk); | ||
80 | ret = clk_prepare_enable(dev->clk); | ||
81 | if (ret) { | ||
82 | clk_put(dev->clk); | ||
83 | return ret; | ||
84 | } | ||
56 | 85 | ||
57 | spin_unlock(&wdt_lock); | 86 | dev->clk_rate = clk_get_rate(dev->clk); |
58 | return 0; | 87 | return 0; |
59 | } | 88 | } |
60 | 89 | ||
61 | static int orion_wdt_start(struct watchdog_device *wdt_dev) | 90 | static int armada370_wdt_clock_init(struct platform_device *pdev, |
91 | struct orion_watchdog *dev) | ||
62 | { | 92 | { |
63 | u32 reg; | 93 | int ret; |
64 | 94 | ||
65 | spin_lock(&wdt_lock); | 95 | dev->clk = clk_get(&pdev->dev, NULL); |
96 | if (IS_ERR(dev->clk)) | ||
97 | return PTR_ERR(dev->clk); | ||
98 | ret = clk_prepare_enable(dev->clk); | ||
99 | if (ret) { | ||
100 | clk_put(dev->clk); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* Setup watchdog input clock */ | ||
105 | atomic_io_modify(dev->reg + TIMER_CTRL, | ||
106 | WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT), | ||
107 | WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT)); | ||
108 | |||
109 | dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int armadaxp_wdt_clock_init(struct platform_device *pdev, | ||
114 | struct orion_watchdog *dev) | ||
115 | { | ||
116 | int ret; | ||
117 | |||
118 | dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed"); | ||
119 | if (IS_ERR(dev->clk)) | ||
120 | return PTR_ERR(dev->clk); | ||
121 | ret = clk_prepare_enable(dev->clk); | ||
122 | if (ret) { | ||
123 | clk_put(dev->clk); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | /* Enable the fixed watchdog clock input */ | ||
128 | atomic_io_modify(dev->reg + TIMER_CTRL, | ||
129 | WDT_AXP_FIXED_ENABLE_BIT, | ||
130 | WDT_AXP_FIXED_ENABLE_BIT); | ||
131 | |||
132 | dev->clk_rate = clk_get_rate(dev->clk); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int orion_wdt_ping(struct watchdog_device *wdt_dev) | ||
137 | { | ||
138 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); | ||
139 | /* Reload watchdog duration */ | ||
140 | writel(dev->clk_rate * wdt_dev->timeout, | ||
141 | dev->reg + dev->data->wdt_counter_offset); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int armada370_start(struct watchdog_device *wdt_dev) | ||
146 | { | ||
147 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); | ||
66 | 148 | ||
67 | /* Set watchdog duration */ | 149 | /* Set watchdog duration */ |
68 | writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL); | 150 | writel(dev->clk_rate * wdt_dev->timeout, |
151 | dev->reg + dev->data->wdt_counter_offset); | ||
69 | 152 | ||
70 | /* Clear watchdog timer interrupt */ | 153 | /* Clear the watchdog expiration bit */ |
71 | writel(~WDT_INT_REQ, BRIDGE_CAUSE); | 154 | atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0); |
72 | 155 | ||
73 | /* Enable watchdog timer */ | 156 | /* Enable watchdog timer */ |
74 | reg = readl(wdt_reg + TIMER_CTRL); | 157 | atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, |
75 | reg |= WDT_EN; | 158 | dev->data->wdt_enable_bit); |
76 | writel(reg, wdt_reg + TIMER_CTRL); | 159 | |
160 | atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, | ||
161 | dev->data->rstout_enable_bit); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int orion_start(struct watchdog_device *wdt_dev) | ||
166 | { | ||
167 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); | ||
168 | |||
169 | /* Set watchdog duration */ | ||
170 | writel(dev->clk_rate * wdt_dev->timeout, | ||
171 | dev->reg + dev->data->wdt_counter_offset); | ||
172 | |||
173 | /* Enable watchdog timer */ | ||
174 | atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, | ||
175 | dev->data->wdt_enable_bit); | ||
77 | 176 | ||
78 | /* Enable reset on watchdog */ | 177 | /* Enable reset on watchdog */ |
79 | reg = readl(RSTOUTn_MASK); | 178 | atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, |
80 | reg |= WDT_RESET_OUT_EN; | 179 | dev->data->rstout_enable_bit); |
81 | writel(reg, RSTOUTn_MASK); | ||
82 | 180 | ||
83 | spin_unlock(&wdt_lock); | ||
84 | return 0; | 181 | return 0; |
85 | } | 182 | } |
86 | 183 | ||
87 | static int orion_wdt_stop(struct watchdog_device *wdt_dev) | 184 | static int orion_wdt_start(struct watchdog_device *wdt_dev) |
88 | { | 185 | { |
89 | u32 reg; | 186 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); |
90 | 187 | ||
91 | spin_lock(&wdt_lock); | 188 | /* There are some per-SoC quirks to handle */ |
189 | return dev->data->start(wdt_dev); | ||
190 | } | ||
191 | |||
192 | static int orion_wdt_stop(struct watchdog_device *wdt_dev) | ||
193 | { | ||
194 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); | ||
92 | 195 | ||
93 | /* Disable reset on watchdog */ | 196 | /* Disable reset on watchdog */ |
94 | reg = readl(RSTOUTn_MASK); | 197 | atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0); |
95 | reg &= ~WDT_RESET_OUT_EN; | ||
96 | writel(reg, RSTOUTn_MASK); | ||
97 | 198 | ||
98 | /* Disable watchdog timer */ | 199 | /* Disable watchdog timer */ |
99 | reg = readl(wdt_reg + TIMER_CTRL); | 200 | atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0); |
100 | reg &= ~WDT_EN; | ||
101 | writel(reg, wdt_reg + TIMER_CTRL); | ||
102 | 201 | ||
103 | spin_unlock(&wdt_lock); | ||
104 | return 0; | 202 | return 0; |
105 | } | 203 | } |
106 | 204 | ||
107 | static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev) | 205 | static int orion_wdt_enabled(struct orion_watchdog *dev) |
108 | { | 206 | { |
109 | unsigned int time_left; | 207 | bool enabled, running; |
208 | |||
209 | enabled = readl(dev->rstout) & dev->data->rstout_enable_bit; | ||
210 | running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit; | ||
110 | 211 | ||
111 | spin_lock(&wdt_lock); | 212 | return enabled && running; |
112 | time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk; | 213 | } |
113 | spin_unlock(&wdt_lock); | ||
114 | 214 | ||
115 | return time_left; | 215 | static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev) |
216 | { | ||
217 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); | ||
218 | return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate; | ||
116 | } | 219 | } |
117 | 220 | ||
118 | static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev, | 221 | static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev, |
@@ -136,68 +239,188 @@ static const struct watchdog_ops orion_wdt_ops = { | |||
136 | .get_timeleft = orion_wdt_get_timeleft, | 239 | .get_timeleft = orion_wdt_get_timeleft, |
137 | }; | 240 | }; |
138 | 241 | ||
139 | static struct watchdog_device orion_wdt = { | 242 | static irqreturn_t orion_wdt_irq(int irq, void *devid) |
140 | .info = &orion_wdt_info, | 243 | { |
141 | .ops = &orion_wdt_ops, | 244 | panic("Watchdog Timeout"); |
142 | .min_timeout = 1, | 245 | return IRQ_HANDLED; |
246 | } | ||
247 | |||
248 | /* | ||
249 | * The original devicetree binding for this driver specified only | ||
250 | * one memory resource, so in order to keep DT backwards compatibility | ||
251 | * we try to fallback to a hardcoded register address, if the resource | ||
252 | * is missing from the devicetree. | ||
253 | */ | ||
254 | static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev, | ||
255 | phys_addr_t internal_regs) | ||
256 | { | ||
257 | struct resource *res; | ||
258 | phys_addr_t rstout; | ||
259 | |||
260 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
261 | if (res) | ||
262 | return devm_ioremap(&pdev->dev, res->start, | ||
263 | resource_size(res)); | ||
264 | |||
265 | /* This workaround works only for "orion-wdt", DT-enabled */ | ||
266 | if (!of_device_is_compatible(pdev->dev.of_node, "marvell,orion-wdt")) | ||
267 | return NULL; | ||
268 | |||
269 | rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET; | ||
270 | |||
271 | WARN(1, FW_BUG "falling back to harcoded RSTOUT reg %pa\n", &rstout); | ||
272 | return devm_ioremap(&pdev->dev, rstout, 0x4); | ||
273 | } | ||
274 | |||
275 | static const struct orion_watchdog_data orion_data = { | ||
276 | .rstout_enable_bit = BIT(1), | ||
277 | .wdt_enable_bit = BIT(4), | ||
278 | .wdt_counter_offset = 0x24, | ||
279 | .clock_init = orion_wdt_clock_init, | ||
280 | .start = orion_start, | ||
281 | }; | ||
282 | |||
283 | static const struct orion_watchdog_data armada370_data = { | ||
284 | .rstout_enable_bit = BIT(8), | ||
285 | .wdt_enable_bit = BIT(8), | ||
286 | .wdt_counter_offset = 0x34, | ||
287 | .clock_init = armada370_wdt_clock_init, | ||
288 | .start = armada370_start, | ||
143 | }; | 289 | }; |
144 | 290 | ||
291 | static const struct orion_watchdog_data armadaxp_data = { | ||
292 | .rstout_enable_bit = BIT(8), | ||
293 | .wdt_enable_bit = BIT(8), | ||
294 | .wdt_counter_offset = 0x34, | ||
295 | .clock_init = armadaxp_wdt_clock_init, | ||
296 | .start = armada370_start, | ||
297 | }; | ||
298 | |||
299 | static const struct of_device_id orion_wdt_of_match_table[] = { | ||
300 | { | ||
301 | .compatible = "marvell,orion-wdt", | ||
302 | .data = &orion_data, | ||
303 | }, | ||
304 | { | ||
305 | .compatible = "marvell,armada-370-wdt", | ||
306 | .data = &armada370_data, | ||
307 | }, | ||
308 | { | ||
309 | .compatible = "marvell,armada-xp-wdt", | ||
310 | .data = &armadaxp_data, | ||
311 | }, | ||
312 | {}, | ||
313 | }; | ||
314 | MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table); | ||
315 | |||
145 | static int orion_wdt_probe(struct platform_device *pdev) | 316 | static int orion_wdt_probe(struct platform_device *pdev) |
146 | { | 317 | { |
318 | struct orion_watchdog *dev; | ||
319 | const struct of_device_id *match; | ||
320 | unsigned int wdt_max_duration; /* (seconds) */ | ||
147 | struct resource *res; | 321 | struct resource *res; |
148 | int ret; | 322 | int ret, irq; |
149 | 323 | ||
150 | clk = devm_clk_get(&pdev->dev, NULL); | 324 | dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog), |
151 | if (IS_ERR(clk)) { | 325 | GFP_KERNEL); |
152 | dev_err(&pdev->dev, "Orion Watchdog missing clock\n"); | 326 | if (!dev) |
153 | return -ENODEV; | 327 | return -ENOMEM; |
154 | } | 328 | |
155 | clk_prepare_enable(clk); | 329 | match = of_match_device(orion_wdt_of_match_table, &pdev->dev); |
156 | wdt_tclk = clk_get_rate(clk); | 330 | if (!match) |
331 | /* Default legacy match */ | ||
332 | match = &orion_wdt_of_match_table[0]; | ||
333 | |||
334 | dev->wdt.info = &orion_wdt_info; | ||
335 | dev->wdt.ops = &orion_wdt_ops; | ||
336 | dev->wdt.min_timeout = 1; | ||
337 | dev->data = match->data; | ||
157 | 338 | ||
158 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 339 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
159 | if (!res) | 340 | if (!res) |
160 | return -ENODEV; | 341 | return -ENODEV; |
161 | wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); | ||
162 | if (!wdt_reg) | ||
163 | return -ENOMEM; | ||
164 | 342 | ||
165 | wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; | 343 | dev->reg = devm_ioremap(&pdev->dev, res->start, |
344 | resource_size(res)); | ||
345 | if (!dev->reg) | ||
346 | return -ENOMEM; | ||
166 | 347 | ||
167 | orion_wdt.timeout = wdt_max_duration; | 348 | dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start & |
168 | orion_wdt.max_timeout = wdt_max_duration; | 349 | INTERNAL_REGS_MASK); |
169 | watchdog_init_timeout(&orion_wdt, heartbeat, &pdev->dev); | 350 | if (!dev->rstout) |
351 | return -ENODEV; | ||
170 | 352 | ||
171 | watchdog_set_nowayout(&orion_wdt, nowayout); | 353 | ret = dev->data->clock_init(pdev, dev); |
172 | ret = watchdog_register_device(&orion_wdt); | ||
173 | if (ret) { | 354 | if (ret) { |
174 | clk_disable_unprepare(clk); | 355 | dev_err(&pdev->dev, "cannot initialize clock\n"); |
175 | return ret; | 356 | return ret; |
176 | } | 357 | } |
177 | 358 | ||
359 | wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate; | ||
360 | |||
361 | dev->wdt.timeout = wdt_max_duration; | ||
362 | dev->wdt.max_timeout = wdt_max_duration; | ||
363 | watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev); | ||
364 | |||
365 | platform_set_drvdata(pdev, &dev->wdt); | ||
366 | watchdog_set_drvdata(&dev->wdt, dev); | ||
367 | |||
368 | /* | ||
369 | * Let's make sure the watchdog is fully stopped, unless it's | ||
370 | * explicitly enabled. This may be the case if the module was | ||
371 | * removed and re-insterted, or if the bootloader explicitly | ||
372 | * set a running watchdog before booting the kernel. | ||
373 | */ | ||
374 | if (!orion_wdt_enabled(dev)) | ||
375 | orion_wdt_stop(&dev->wdt); | ||
376 | |||
377 | /* Request the IRQ only after the watchdog is disabled */ | ||
378 | irq = platform_get_irq(pdev, 0); | ||
379 | if (irq > 0) { | ||
380 | /* | ||
381 | * Not all supported platforms specify an interrupt for the | ||
382 | * watchdog, so let's make it optional. | ||
383 | */ | ||
384 | ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0, | ||
385 | pdev->name, dev); | ||
386 | if (ret < 0) { | ||
387 | dev_err(&pdev->dev, "failed to request IRQ\n"); | ||
388 | goto disable_clk; | ||
389 | } | ||
390 | } | ||
391 | |||
392 | watchdog_set_nowayout(&dev->wdt, nowayout); | ||
393 | ret = watchdog_register_device(&dev->wdt); | ||
394 | if (ret) | ||
395 | goto disable_clk; | ||
396 | |||
178 | pr_info("Initial timeout %d sec%s\n", | 397 | pr_info("Initial timeout %d sec%s\n", |
179 | orion_wdt.timeout, nowayout ? ", nowayout" : ""); | 398 | dev->wdt.timeout, nowayout ? ", nowayout" : ""); |
180 | return 0; | 399 | return 0; |
400 | |||
401 | disable_clk: | ||
402 | clk_disable_unprepare(dev->clk); | ||
403 | clk_put(dev->clk); | ||
404 | return ret; | ||
181 | } | 405 | } |
182 | 406 | ||
183 | static int orion_wdt_remove(struct platform_device *pdev) | 407 | static int orion_wdt_remove(struct platform_device *pdev) |
184 | { | 408 | { |
185 | watchdog_unregister_device(&orion_wdt); | 409 | struct watchdog_device *wdt_dev = platform_get_drvdata(pdev); |
186 | clk_disable_unprepare(clk); | 410 | struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); |
411 | |||
412 | watchdog_unregister_device(wdt_dev); | ||
413 | clk_disable_unprepare(dev->clk); | ||
414 | clk_put(dev->clk); | ||
187 | return 0; | 415 | return 0; |
188 | } | 416 | } |
189 | 417 | ||
190 | static void orion_wdt_shutdown(struct platform_device *pdev) | 418 | static void orion_wdt_shutdown(struct platform_device *pdev) |
191 | { | 419 | { |
192 | orion_wdt_stop(&orion_wdt); | 420 | struct watchdog_device *wdt_dev = platform_get_drvdata(pdev); |
421 | orion_wdt_stop(wdt_dev); | ||
193 | } | 422 | } |
194 | 423 | ||
195 | static const struct of_device_id orion_wdt_of_match_table[] = { | ||
196 | { .compatible = "marvell,orion-wdt", }, | ||
197 | {}, | ||
198 | }; | ||
199 | MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table); | ||
200 | |||
201 | static struct platform_driver orion_wdt_driver = { | 424 | static struct platform_driver orion_wdt_driver = { |
202 | .probe = orion_wdt_probe, | 425 | .probe = orion_wdt_probe, |
203 | .remove = orion_wdt_remove, | 426 | .remove = orion_wdt_remove, |