aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dma-buf.c25
-rw-r--r--drivers/bus/arm-cci.c24
-rw-r--r--drivers/char/hw_random/Kconfig6
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile3
-rw-r--r--drivers/clk/bcm/clk-bcm281xx.c416
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c769
-rw-r--r--drivers/clk/bcm/clk-kona.c1033
-rw-r--r--drivers/clk/bcm/clk-kona.h410
-rw-r--r--drivers/clk/versatile/clk-icst.c21
-rw-r--r--drivers/clk/versatile/clk-icst.h1
-rw-r--r--drivers/clk/versatile/clk-impd1.c6
-rw-r--r--drivers/clk/versatile/clk-integrator.c83
-rw-r--r--drivers/clk/versatile/clk-realview.c4
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/qcom-timer.c330
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-crossbar.c208
-rw-r--r--drivers/irqchip/irq-gic.c82
-rw-r--r--drivers/irqchip/irq-orion.c22
-rw-r--r--drivers/irqchip/irq-vic.c60
-rw-r--r--drivers/mtd/nand/davinci_nand.c22
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/sh/clk/cpg.c38
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/orion_wdt.c381
33 files changed, 3768 insertions, 210 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..61d6d62cc0d3 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
616 if (ret) 616 if (ret)
617 return ret; 617 return ret;
618 618
619 seq_printf(s, "\nDma-buf Objects:\n"); 619 seq_puts(s, "\nDma-buf Objects:\n");
620 seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); 620 seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
621 621
622 list_for_each_entry(buf_obj, &db_list.head, list_node) { 622 list_for_each_entry(buf_obj, &db_list.head, list_node) {
623 ret = mutex_lock_interruptible(&buf_obj->lock); 623 ret = mutex_lock_interruptible(&buf_obj->lock);
624 624
625 if (ret) { 625 if (ret) {
626 seq_printf(s, 626 seq_puts(s,
627 "\tERROR locking buffer object: skipping\n"); 627 "\tERROR locking buffer object: skipping\n");
628 continue; 628 continue;
629 } 629 }
630 630
631 seq_printf(s, "\t"); 631 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
632 632 buf_obj->size,
633 seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
634 buf_obj->exp_name, buf_obj->size,
635 buf_obj->file->f_flags, buf_obj->file->f_mode, 633 buf_obj->file->f_flags, buf_obj->file->f_mode,
636 (long)(buf_obj->file->f_count.counter)); 634 (long)(buf_obj->file->f_count.counter),
635 buf_obj->exp_name);
637 636
638 seq_printf(s, "\t\tAttached Devices:\n"); 637 seq_puts(s, "\tAttached Devices:\n");
639 attach_count = 0; 638 attach_count = 0;
640 639
641 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 640 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
642 seq_printf(s, "\t\t"); 641 seq_puts(s, "\t");
643 642
644 seq_printf(s, "%s\n", attach_obj->dev->init_name); 643 seq_printf(s, "%s\n", dev_name(attach_obj->dev));
645 attach_count++; 644 attach_count++;
646 } 645 }
647 646
648 seq_printf(s, "\n\t\tTotal %d devices attached\n", 647 seq_printf(s, "Total %d devices attached\n\n",
649 attach_count); 648 attach_count);
650 649
651 count++; 650 count++;
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 962fd35cbd8d..5a86da97a70b 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -31,7 +31,6 @@
31 31
32#define DRIVER_NAME "CCI-400" 32#define DRIVER_NAME "CCI-400"
33#define DRIVER_NAME_PMU DRIVER_NAME " PMU" 33#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
34#define PMU_NAME "CCI_400"
35 34
36#define CCI_PORT_CTRL 0x0 35#define CCI_PORT_CTRL 0x0
37#define CCI_CTRL_STATUS 0xc 36#define CCI_CTRL_STATUS 0xc
@@ -88,8 +87,7 @@ static unsigned long cci_ctrl_phys;
88 87
89#define CCI_REV_R0 0 88#define CCI_REV_R0 0
90#define CCI_REV_R1 1 89#define CCI_REV_R1 1
91#define CCI_REV_R0_P4 4 90#define CCI_REV_R1_PX 5
92#define CCI_REV_R1_P2 6
93 91
94#define CCI_PMU_EVT_SEL 0x000 92#define CCI_PMU_EVT_SEL 0x000
95#define CCI_PMU_CNTR 0x004 93#define CCI_PMU_CNTR 0x004
@@ -163,6 +161,15 @@ static struct pmu_port_event_ranges port_event_range[] = {
163 }, 161 },
164}; 162};
165 163
164/*
165 * Export different PMU names for the different revisions so userspace knows
166 * because the event ids are different
167 */
168static char *const pmu_names[] = {
169 [CCI_REV_R0] = "CCI_400",
170 [CCI_REV_R1] = "CCI_400_r1",
171};
172
166struct cci_pmu_drv_data { 173struct cci_pmu_drv_data {
167 void __iomem *base; 174 void __iomem *base;
168 struct arm_pmu *cci_pmu; 175 struct arm_pmu *cci_pmu;
@@ -193,21 +200,16 @@ static int probe_cci_revision(void)
193 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; 200 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
194 rev >>= CCI_PID2_REV_SHIFT; 201 rev >>= CCI_PID2_REV_SHIFT;
195 202
196 if (rev <= CCI_REV_R0_P4) 203 if (rev < CCI_REV_R1_PX)
197 return CCI_REV_R0; 204 return CCI_REV_R0;
198 else if (rev <= CCI_REV_R1_P2) 205 else
199 return CCI_REV_R1; 206 return CCI_REV_R1;
200
201 return -ENOENT;
202} 207}
203 208
204static struct pmu_port_event_ranges *port_range_by_rev(void) 209static struct pmu_port_event_ranges *port_range_by_rev(void)
205{ 210{
206 int rev = probe_cci_revision(); 211 int rev = probe_cci_revision();
207 212
208 if (rev < 0)
209 return NULL;
210
211 return &port_event_range[rev]; 213 return &port_event_range[rev];
212} 214}
213 215
@@ -526,7 +528,7 @@ static void pmu_write_counter(struct perf_event *event, u32 value)
526static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) 528static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
527{ 529{
528 *cci_pmu = (struct arm_pmu){ 530 *cci_pmu = (struct arm_pmu){
529 .name = PMU_NAME, 531 .name = pmu_names[probe_cci_revision()],
530 .max_period = (1LLU << 32) - 1, 532 .max_period = (1LLU << 32) - 1,
531 .get_hw_events = pmu_get_hw_events, 533 .get_hw_events = pmu_get_hw_events,
532 .get_event_idx = pmu_get_event_idx, 534 .get_event_idx = pmu_get_event_idx,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 2f2b08457c67..244759bbd7b7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -342,11 +342,11 @@ config HW_RANDOM_TPM
342 If unsure, say Y. 342 If unsure, say Y.
343 343
344config HW_RANDOM_MSM 344config HW_RANDOM_MSM
345 tristate "Qualcomm MSM Random Number Generator support" 345 tristate "Qualcomm SoCs Random Number Generator support"
346 depends on HW_RANDOM && ARCH_MSM 346 depends on HW_RANDOM && ARCH_QCOM
347 ---help--- 347 ---help---
348 This driver provides kernel-side support for the Random Number 348 This driver provides kernel-side support for the Random Number
349 Generator hardware found on Qualcomm MSM SoCs. 349 Generator hardware found on Qualcomm SoCs.
350 350
351 To compile this driver as a module, choose M here. the 351 To compile this driver as a module, choose M here. the
352 module will be called msm-rng. 352 module will be called msm-rng.
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 7641965d208d..f9f605695e40 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -111,4 +111,5 @@ source "drivers/clk/qcom/Kconfig"
111 111
112endmenu 112endmenu
113 113
114source "drivers/clk/bcm/Kconfig"
114source "drivers/clk/mvebu/Kconfig" 115source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index a367a9831717..88af4a399d6c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
29obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o 29obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
30obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o 30obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
31obj-$(CONFIG_COMMON_CLK_AT91) += at91/ 31obj-$(CONFIG_COMMON_CLK_AT91) += at91/
32obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/
32obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ 33obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
33obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ 34obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
34ifeq ($(CONFIG_COMMON_CLK), y) 35ifeq ($(CONFIG_COMMON_CLK), y)
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
new file mode 100644
index 000000000000..a7262fb8ce55
--- /dev/null
+++ b/drivers/clk/bcm/Kconfig
@@ -0,0 +1,9 @@
1config CLK_BCM_KONA
2 bool "Broadcom Kona CCU clock support"
3 depends on ARCH_BCM_MOBILE
4 depends on COMMON_CLK
5 default y
6 help
7 Enable common clock framework support for Broadcom SoCs
8 using "Kona" style clock control units, including those
9 in the BCM281xx family.
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
new file mode 100644
index 000000000000..cf93359aa862
--- /dev/null
+++ b/drivers/clk/bcm/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
2obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
3obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c
new file mode 100644
index 000000000000..3c66de696aeb
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm281xx.c
@@ -0,0 +1,416 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "clk-kona.h"
16#include "dt-bindings/clock/bcm281xx.h"
17
18/* bcm11351 CCU device tree "compatible" strings */
19#define BCM11351_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu"
20#define BCM11351_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu"
21#define BCM11351_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu"
22#define BCM11351_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu"
23#define BCM11351_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu"
24
25/* Root CCU clocks */
26
27static struct peri_clk_data frac_1m_data = {
28 .gate = HW_SW_GATE(0x214, 16, 0, 1),
29 .trig = TRIGGER(0x0e04, 0),
30 .div = FRAC_DIVIDER(0x0e00, 0, 22, 16),
31 .clocks = CLOCKS("ref_crystal"),
32};
33
34/* AON CCU clocks */
35
36static struct peri_clk_data hub_timer_data = {
37 .gate = HW_SW_GATE(0x0414, 16, 0, 1),
38 .clocks = CLOCKS("bbl_32k",
39 "frac_1m",
40 "dft_19_5m"),
41 .sel = SELECTOR(0x0a10, 0, 2),
42 .trig = TRIGGER(0x0a40, 4),
43};
44
45static struct peri_clk_data pmu_bsc_data = {
46 .gate = HW_SW_GATE(0x0418, 16, 0, 1),
47 .clocks = CLOCKS("ref_crystal",
48 "pmu_bsc_var",
49 "bbl_32k"),
50 .sel = SELECTOR(0x0a04, 0, 2),
51 .div = DIVIDER(0x0a04, 3, 4),
52 .trig = TRIGGER(0x0a40, 0),
53};
54
55static struct peri_clk_data pmu_bsc_var_data = {
56 .clocks = CLOCKS("var_312m",
57 "ref_312m"),
58 .sel = SELECTOR(0x0a00, 0, 2),
59 .div = DIVIDER(0x0a00, 4, 5),
60 .trig = TRIGGER(0x0a40, 2),
61};
62
63/* Hub CCU clocks */
64
65static struct peri_clk_data tmon_1m_data = {
66 .gate = HW_SW_GATE(0x04a4, 18, 2, 3),
67 .clocks = CLOCKS("ref_crystal",
68 "frac_1m"),
69 .sel = SELECTOR(0x0e74, 0, 2),
70 .trig = TRIGGER(0x0e84, 1),
71};
72
73/* Master CCU clocks */
74
75static struct peri_clk_data sdio1_data = {
76 .gate = HW_SW_GATE(0x0358, 18, 2, 3),
77 .clocks = CLOCKS("ref_crystal",
78 "var_52m",
79 "ref_52m",
80 "var_96m",
81 "ref_96m"),
82 .sel = SELECTOR(0x0a28, 0, 3),
83 .div = DIVIDER(0x0a28, 4, 14),
84 .trig = TRIGGER(0x0afc, 9),
85};
86
87static struct peri_clk_data sdio2_data = {
88 .gate = HW_SW_GATE(0x035c, 18, 2, 3),
89 .clocks = CLOCKS("ref_crystal",
90 "var_52m",
91 "ref_52m",
92 "var_96m",
93 "ref_96m"),
94 .sel = SELECTOR(0x0a2c, 0, 3),
95 .div = DIVIDER(0x0a2c, 4, 14),
96 .trig = TRIGGER(0x0afc, 10),
97};
98
99static struct peri_clk_data sdio3_data = {
100 .gate = HW_SW_GATE(0x0364, 18, 2, 3),
101 .clocks = CLOCKS("ref_crystal",
102 "var_52m",
103 "ref_52m",
104 "var_96m",
105 "ref_96m"),
106 .sel = SELECTOR(0x0a34, 0, 3),
107 .div = DIVIDER(0x0a34, 4, 14),
108 .trig = TRIGGER(0x0afc, 12),
109};
110
111static struct peri_clk_data sdio4_data = {
112 .gate = HW_SW_GATE(0x0360, 18, 2, 3),
113 .clocks = CLOCKS("ref_crystal",
114 "var_52m",
115 "ref_52m",
116 "var_96m",
117 "ref_96m"),
118 .sel = SELECTOR(0x0a30, 0, 3),
119 .div = DIVIDER(0x0a30, 4, 14),
120 .trig = TRIGGER(0x0afc, 11),
121};
122
123static struct peri_clk_data usb_ic_data = {
124 .gate = HW_SW_GATE(0x0354, 18, 2, 3),
125 .clocks = CLOCKS("ref_crystal",
126 "var_96m",
127 "ref_96m"),
128 .div = FIXED_DIVIDER(2),
129 .sel = SELECTOR(0x0a24, 0, 2),
130 .trig = TRIGGER(0x0afc, 7),
131};
132
133/* also called usbh_48m */
134static struct peri_clk_data hsic2_48m_data = {
135 .gate = HW_SW_GATE(0x0370, 18, 2, 3),
136 .clocks = CLOCKS("ref_crystal",
137 "var_96m",
138 "ref_96m"),
139 .sel = SELECTOR(0x0a38, 0, 2),
140 .div = FIXED_DIVIDER(2),
141 .trig = TRIGGER(0x0afc, 5),
142};
143
144/* also called usbh_12m */
145static struct peri_clk_data hsic2_12m_data = {
146 .gate = HW_SW_GATE(0x0370, 20, 4, 5),
147 .div = DIVIDER(0x0a38, 12, 2),
148 .clocks = CLOCKS("ref_crystal",
149 "var_96m",
150 "ref_96m"),
151 .pre_div = FIXED_DIVIDER(2),
152 .sel = SELECTOR(0x0a38, 0, 2),
153 .trig = TRIGGER(0x0afc, 5),
154};
155
156/* Slave CCU clocks */
157
158static struct peri_clk_data uartb_data = {
159 .gate = HW_SW_GATE(0x0400, 18, 2, 3),
160 .clocks = CLOCKS("ref_crystal",
161 "var_156m",
162 "ref_156m"),
163 .sel = SELECTOR(0x0a10, 0, 2),
164 .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
165 .trig = TRIGGER(0x0afc, 2),
166};
167
168static struct peri_clk_data uartb2_data = {
169 .gate = HW_SW_GATE(0x0404, 18, 2, 3),
170 .clocks = CLOCKS("ref_crystal",
171 "var_156m",
172 "ref_156m"),
173 .sel = SELECTOR(0x0a14, 0, 2),
174 .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
175 .trig = TRIGGER(0x0afc, 3),
176};
177
178static struct peri_clk_data uartb3_data = {
179 .gate = HW_SW_GATE(0x0408, 18, 2, 3),
180 .clocks = CLOCKS("ref_crystal",
181 "var_156m",
182 "ref_156m"),
183 .sel = SELECTOR(0x0a18, 0, 2),
184 .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
185 .trig = TRIGGER(0x0afc, 4),
186};
187
188static struct peri_clk_data uartb4_data = {
189 .gate = HW_SW_GATE(0x0408, 18, 2, 3),
190 .clocks = CLOCKS("ref_crystal",
191 "var_156m",
192 "ref_156m"),
193 .sel = SELECTOR(0x0a1c, 0, 2),
194 .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8),
195 .trig = TRIGGER(0x0afc, 5),
196};
197
198static struct peri_clk_data ssp0_data = {
199 .gate = HW_SW_GATE(0x0410, 18, 2, 3),
200 .clocks = CLOCKS("ref_crystal",
201 "var_104m",
202 "ref_104m",
203 "var_96m",
204 "ref_96m"),
205 .sel = SELECTOR(0x0a20, 0, 3),
206 .div = DIVIDER(0x0a20, 4, 14),
207 .trig = TRIGGER(0x0afc, 6),
208};
209
210static struct peri_clk_data ssp2_data = {
211 .gate = HW_SW_GATE(0x0418, 18, 2, 3),
212 .clocks = CLOCKS("ref_crystal",
213 "var_104m",
214 "ref_104m",
215 "var_96m",
216 "ref_96m"),
217 .sel = SELECTOR(0x0a28, 0, 3),
218 .div = DIVIDER(0x0a28, 4, 14),
219 .trig = TRIGGER(0x0afc, 8),
220};
221
222static struct peri_clk_data bsc1_data = {
223 .gate = HW_SW_GATE(0x0458, 18, 2, 3),
224 .clocks = CLOCKS("ref_crystal",
225 "var_104m",
226 "ref_104m",
227 "var_13m",
228 "ref_13m"),
229 .sel = SELECTOR(0x0a64, 0, 3),
230 .trig = TRIGGER(0x0afc, 23),
231};
232
233static struct peri_clk_data bsc2_data = {
234 .gate = HW_SW_GATE(0x045c, 18, 2, 3),
235 .clocks = CLOCKS("ref_crystal",
236 "var_104m",
237 "ref_104m",
238 "var_13m",
239 "ref_13m"),
240 .sel = SELECTOR(0x0a68, 0, 3),
241 .trig = TRIGGER(0x0afc, 24),
242};
243
244static struct peri_clk_data bsc3_data = {
245 .gate = HW_SW_GATE(0x0484, 18, 2, 3),
246 .clocks = CLOCKS("ref_crystal",
247 "var_104m",
248 "ref_104m",
249 "var_13m",
250 "ref_13m"),
251 .sel = SELECTOR(0x0a84, 0, 3),
252 .trig = TRIGGER(0x0b00, 2),
253};
254
255static struct peri_clk_data pwm_data = {
256 .gate = HW_SW_GATE(0x0468, 18, 2, 3),
257 .clocks = CLOCKS("ref_crystal",
258 "var_104m"),
259 .sel = SELECTOR(0x0a70, 0, 2),
260 .div = DIVIDER(0x0a70, 4, 3),
261 .trig = TRIGGER(0x0afc, 15),
262};
263
264/*
265 * CCU setup routines
266 *
267 * These are called from kona_dt_ccu_setup() to initialize the array
268 * of clocks provided by the CCU. Once allocated, the entries in
269 * the array are initialized by calling kona_clk_setup() with the
270 * initialization data for each clock. They return 0 if successful
271 * or an error code otherwise.
272 */
273static int __init bcm281xx_root_ccu_clks_setup(struct ccu_data *ccu)
274{
275 struct clk **clks;
276 size_t count = BCM281XX_ROOT_CCU_CLOCK_COUNT;
277
278 clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
279 if (!clks) {
280 pr_err("%s: failed to allocate root clocks\n", __func__);
281 return -ENOMEM;
282 }
283 ccu->data.clks = clks;
284 ccu->data.clk_num = count;
285
286 PERI_CLK_SETUP(clks, ccu, BCM281XX_ROOT_CCU_FRAC_1M, frac_1m);
287
288 return 0;
289}
290
291static int __init bcm281xx_aon_ccu_clks_setup(struct ccu_data *ccu)
292{
293 struct clk **clks;
294 size_t count = BCM281XX_AON_CCU_CLOCK_COUNT;
295
296 clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
297 if (!clks) {
298 pr_err("%s: failed to allocate aon clocks\n", __func__);
299 return -ENOMEM;
300 }
301 ccu->data.clks = clks;
302 ccu->data.clk_num = count;
303
304 PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_HUB_TIMER, hub_timer);
305 PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC, pmu_bsc);
306 PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC_VAR, pmu_bsc_var);
307
308 return 0;
309}
310
311static int __init bcm281xx_hub_ccu_clks_setup(struct ccu_data *ccu)
312{
313 struct clk **clks;
314 size_t count = BCM281XX_HUB_CCU_CLOCK_COUNT;
315
316 clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
317 if (!clks) {
318 pr_err("%s: failed to allocate hub clocks\n", __func__);
319 return -ENOMEM;
320 }
321 ccu->data.clks = clks;
322 ccu->data.clk_num = count;
323
324 PERI_CLK_SETUP(clks, ccu, BCM281XX_HUB_CCU_TMON_1M, tmon_1m);
325
326 return 0;
327}
328
329static int __init bcm281xx_master_ccu_clks_setup(struct ccu_data *ccu)
330{
331 struct clk **clks;
332 size_t count = BCM281XX_MASTER_CCU_CLOCK_COUNT;
333
334 clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
335 if (!clks) {
336 pr_err("%s: failed to allocate master clocks\n", __func__);
337 return -ENOMEM;
338 }
339 ccu->data.clks = clks;
340 ccu->data.clk_num = count;
341
342 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO1, sdio1);
343 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO2, sdio2);
344 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO3, sdio3);
345 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO4, sdio4);
346 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_USB_IC, usb_ic);
347 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_48M, hsic2_48m);
348 PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_12M, hsic2_12m);
349
350 return 0;
351}
352
353static int __init bcm281xx_slave_ccu_clks_setup(struct ccu_data *ccu)
354{
355 struct clk **clks;
356 size_t count = BCM281XX_SLAVE_CCU_CLOCK_COUNT;
357
358 clks = kzalloc(count * sizeof(*clks), GFP_KERNEL);
359 if (!clks) {
360 pr_err("%s: failed to allocate slave clocks\n", __func__);
361 return -ENOMEM;
362 }
363 ccu->data.clks = clks;
364 ccu->data.clk_num = count;
365
366 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB, uartb);
367 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB2, uartb2);
368 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB3, uartb3);
369 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB4, uartb4);
370 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP0, ssp0);
371 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP2, ssp2);
372 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC1, bsc1);
373 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC2, bsc2);
374 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC3, bsc3);
375 PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_PWM, pwm);
376
377 return 0;
378}
379
380/* Device tree match table callback functions */
381
382static void __init kona_dt_root_ccu_setup(struct device_node *node)
383{
384 kona_dt_ccu_setup(node, bcm281xx_root_ccu_clks_setup);
385}
386
387static void __init kona_dt_aon_ccu_setup(struct device_node *node)
388{
389 kona_dt_ccu_setup(node, bcm281xx_aon_ccu_clks_setup);
390}
391
392static void __init kona_dt_hub_ccu_setup(struct device_node *node)
393{
394 kona_dt_ccu_setup(node, bcm281xx_hub_ccu_clks_setup);
395}
396
397static void __init kona_dt_master_ccu_setup(struct device_node *node)
398{
399 kona_dt_ccu_setup(node, bcm281xx_master_ccu_clks_setup);
400}
401
402static void __init kona_dt_slave_ccu_setup(struct device_node *node)
403{
404 kona_dt_ccu_setup(node, bcm281xx_slave_ccu_clks_setup);
405}
406
407CLK_OF_DECLARE(bcm11351_root_ccu, BCM11351_DT_ROOT_CCU_COMPAT,
408 kona_dt_root_ccu_setup);
409CLK_OF_DECLARE(bcm11351_aon_ccu, BCM11351_DT_AON_CCU_COMPAT,
410 kona_dt_aon_ccu_setup);
411CLK_OF_DECLARE(bcm11351_hub_ccu, BCM11351_DT_HUB_CCU_COMPAT,
412 kona_dt_hub_ccu_setup);
413CLK_OF_DECLARE(bcm11351_master_ccu, BCM11351_DT_MASTER_CCU_COMPAT,
414 kona_dt_master_ccu_setup);
415CLK_OF_DECLARE(bcm11351_slave_ccu, BCM11351_DT_SLAVE_CCU_COMPAT,
416 kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
new file mode 100644
index 000000000000..c7607feb18dd
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -0,0 +1,769 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/io.h>
16#include <linux/of_address.h>
17
18#include "clk-kona.h"
19
20/* These are used when a selector or trigger is found to be unneeded */
21#define selector_clear_exists(sel) ((sel)->width = 0)
22#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS)
23
24LIST_HEAD(ccu_list); /* The list of set up CCUs */
25
26/* Validity checking */
27
28static bool clk_requires_trigger(struct kona_clk *bcm_clk)
29{
30 struct peri_clk_data *peri = bcm_clk->peri;
31 struct bcm_clk_sel *sel;
32 struct bcm_clk_div *div;
33
34 if (bcm_clk->type != bcm_clk_peri)
35 return false;
36
37 sel = &peri->sel;
38 if (sel->parent_count && selector_exists(sel))
39 return true;
40
41 div = &peri->div;
42 if (!divider_exists(div))
43 return false;
44
45 /* Fixed dividers don't need triggers */
46 if (!divider_is_fixed(div))
47 return true;
48
49 div = &peri->pre_div;
50
51 return divider_exists(div) && !divider_is_fixed(div);
52}
53
54static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
55{
56 struct peri_clk_data *peri;
57 struct bcm_clk_gate *gate;
58 struct bcm_clk_div *div;
59 struct bcm_clk_sel *sel;
60 struct bcm_clk_trig *trig;
61 const char *name;
62 u32 range;
63 u32 limit;
64
65 BUG_ON(bcm_clk->type != bcm_clk_peri);
66 peri = bcm_clk->peri;
67 name = bcm_clk->name;
68 range = bcm_clk->ccu->range;
69
70 limit = range - sizeof(u32);
71 limit = round_down(limit, sizeof(u32));
72
73 gate = &peri->gate;
74 if (gate_exists(gate)) {
75 if (gate->offset > limit) {
76 pr_err("%s: bad gate offset for %s (%u > %u)\n",
77 __func__, name, gate->offset, limit);
78 return false;
79 }
80 }
81
82 div = &peri->div;
83 if (divider_exists(div)) {
84 if (div->offset > limit) {
85 pr_err("%s: bad divider offset for %s (%u > %u)\n",
86 __func__, name, div->offset, limit);
87 return false;
88 }
89 }
90
91 div = &peri->pre_div;
92 if (divider_exists(div)) {
93 if (div->offset > limit) {
94 pr_err("%s: bad pre-divider offset for %s "
95 "(%u > %u)\n",
96 __func__, name, div->offset, limit);
97 return false;
98 }
99 }
100
101 sel = &peri->sel;
102 if (selector_exists(sel)) {
103 if (sel->offset > limit) {
104 pr_err("%s: bad selector offset for %s (%u > %u)\n",
105 __func__, name, sel->offset, limit);
106 return false;
107 }
108 }
109
110 trig = &peri->trig;
111 if (trigger_exists(trig)) {
112 if (trig->offset > limit) {
113 pr_err("%s: bad trigger offset for %s (%u > %u)\n",
114 __func__, name, trig->offset, limit);
115 return false;
116 }
117 }
118
119 trig = &peri->pre_trig;
120 if (trigger_exists(trig)) {
121 if (trig->offset > limit) {
122 pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
123 __func__, name, trig->offset, limit);
124 return false;
125 }
126 }
127
128 return true;
129}
130
131/* A bit position must be less than the number of bits in a 32-bit register. */
132static bool bit_posn_valid(u32 bit_posn, const char *field_name,
133 const char *clock_name)
134{
135 u32 limit = BITS_PER_BYTE * sizeof(u32) - 1;
136
137 if (bit_posn > limit) {
138 pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__,
139 field_name, clock_name, bit_posn, limit);
140 return false;
141 }
142 return true;
143}
144
145/*
146 * A bitfield must be at least 1 bit wide. Both the low-order and
147 * high-order bits must lie within a 32-bit register. We require
148 * fields to be less than 32 bits wide, mainly because we use
149 * shifting to produce field masks, and shifting a full word width
150 * is not well-defined by the C standard.
151 */
152static bool bitfield_valid(u32 shift, u32 width, const char *field_name,
153 const char *clock_name)
154{
155 u32 limit = BITS_PER_BYTE * sizeof(u32);
156
157 if (!width) {
158 pr_err("%s: bad %s field width 0 for %s\n", __func__,
159 field_name, clock_name);
160 return false;
161 }
162 if (shift + width > limit) {
163 pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__,
164 field_name, clock_name, shift, width, limit);
165 return false;
166 }
167 return true;
168}
169
170/*
171 * All gates, if defined, have a status bit, and for hardware-only
172 * gates, that's it. Gates that can be software controlled also
173 * have an enable bit. And a gate that can be hardware or software
174 * controlled will have a hardware/software select bit.
175 */
176static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name,
177 const char *clock_name)
178{
179 if (!bit_posn_valid(gate->status_bit, "gate status", clock_name))
180 return false;
181
182 if (gate_is_sw_controllable(gate)) {
183 if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name))
184 return false;
185
186 if (gate_is_hw_controllable(gate)) {
187 if (!bit_posn_valid(gate->hw_sw_sel_bit,
188 "gate hw/sw select",
189 clock_name))
190 return false;
191 }
192 } else {
193 BUG_ON(!gate_is_hw_controllable(gate));
194 }
195
196 return true;
197}
198
199/*
200 * A selector bitfield must be valid. Its parent_sel array must
201 * also be reasonable for the field.
202 */
203static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name,
204 const char *clock_name)
205{
206 if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name))
207 return false;
208
209 if (sel->parent_count) {
210 u32 max_sel;
211 u32 limit;
212
213 /*
214 * Make sure the selector field can hold all the
215 * selector values we expect to be able to use. A
216 * clock only needs to have a selector defined if it
217 * has more than one parent. And in that case the
218 * highest selector value will be in the last entry
219 * in the array.
220 */
221 max_sel = sel->parent_sel[sel->parent_count - 1];
222 limit = (1 << sel->width) - 1;
223 if (max_sel > limit) {
224 pr_err("%s: bad selector for %s "
225 "(%u needs > %u bits)\n",
226 __func__, clock_name, max_sel,
227 sel->width);
228 return false;
229 }
230 } else {
231 pr_warn("%s: ignoring selector for %s (no parents)\n",
232 __func__, clock_name);
233 selector_clear_exists(sel);
234 kfree(sel->parent_sel);
235 sel->parent_sel = NULL;
236 }
237
238 return true;
239}
240
241/*
242 * A fixed divider just needs to be non-zero. A variable divider
243 * has to have a valid divider bitfield, and if it has a fraction,
244 * the width of the fraction must not be no more than the width of
245 * the divider as a whole.
246 */
247static bool div_valid(struct bcm_clk_div *div, const char *field_name,
248 const char *clock_name)
249{
250 if (divider_is_fixed(div)) {
251 /* Any fixed divider value but 0 is OK */
252 if (div->fixed == 0) {
253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
254 field_name, clock_name);
255 return false;
256 }
257 return true;
258 }
259 if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
260 return false;
261
262 if (divider_has_fraction(div))
263 if (div->frac_width > div->width) {
264 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
265 __func__, field_name, clock_name,
266 div->frac_width, div->width);
267 return false;
268 }
269
270 return true;
271}
272
273/*
274 * If a clock has two dividers, the combined number of fractional
275 * bits must be representable in a 32-bit unsigned value. This
276 * is because we scale up a dividend using both dividers before
277 * dividing to improve accuracy, and we need to avoid overflow.
278 */
279static bool kona_dividers_valid(struct kona_clk *bcm_clk)
280{
281 struct peri_clk_data *peri = bcm_clk->peri;
282 struct bcm_clk_div *div;
283 struct bcm_clk_div *pre_div;
284 u32 limit;
285
286 BUG_ON(bcm_clk->type != bcm_clk_peri);
287
288 if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div))
289 return true;
290
291 div = &peri->div;
292 pre_div = &peri->pre_div;
293 if (divider_is_fixed(div) || divider_is_fixed(pre_div))
294 return true;
295
296 limit = BITS_PER_BYTE * sizeof(u32);
297
298 return div->frac_width + pre_div->frac_width <= limit;
299}
300
301
302/* A trigger just needs to represent a valid bit position */
303static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name,
304 const char *clock_name)
305{
306 return bit_posn_valid(trig->bit, field_name, clock_name);
307}
308
309/* Determine whether the set of peripheral clock registers are valid. */
310static bool
311peri_clk_data_valid(struct kona_clk *bcm_clk)
312{
313 struct peri_clk_data *peri;
314 struct bcm_clk_gate *gate;
315 struct bcm_clk_sel *sel;
316 struct bcm_clk_div *div;
317 struct bcm_clk_div *pre_div;
318 struct bcm_clk_trig *trig;
319 const char *name;
320
321 BUG_ON(bcm_clk->type != bcm_clk_peri);
322
323 /*
324 * First validate register offsets. This is the only place
325 * where we need something from the ccu, so we do these
326 * together.
327 */
328 if (!peri_clk_data_offsets_valid(bcm_clk))
329 return false;
330
331 peri = bcm_clk->peri;
332 name = bcm_clk->name;
333 gate = &peri->gate;
334 if (gate_exists(gate) && !gate_valid(gate, "gate", name))
335 return false;
336
337 sel = &peri->sel;
338 if (selector_exists(sel)) {
339 if (!sel_valid(sel, "selector", name))
340 return false;
341
342 } else if (sel->parent_count > 1) {
343 pr_err("%s: multiple parents but no selector for %s\n",
344 __func__, name);
345
346 return false;
347 }
348
349 div = &peri->div;
350 pre_div = &peri->pre_div;
351 if (divider_exists(div)) {
352 if (!div_valid(div, "divider", name))
353 return false;
354
355 if (divider_exists(pre_div))
356 if (!div_valid(pre_div, "pre-divider", name))
357 return false;
358 } else if (divider_exists(pre_div)) {
359 pr_err("%s: pre-divider but no divider for %s\n", __func__,
360 name);
361 return false;
362 }
363
364 trig = &peri->trig;
365 if (trigger_exists(trig)) {
366 if (!trig_valid(trig, "trigger", name))
367 return false;
368
369 if (trigger_exists(&peri->pre_trig)) {
370 if (!trig_valid(trig, "pre-trigger", name)) {
371 return false;
372 }
373 }
374 if (!clk_requires_trigger(bcm_clk)) {
375 pr_warn("%s: ignoring trigger for %s (not needed)\n",
376 __func__, name);
377 trigger_clear_exists(trig);
378 }
379 } else if (trigger_exists(&peri->pre_trig)) {
380 pr_err("%s: pre-trigger but no trigger for %s\n", __func__,
381 name);
382 return false;
383 } else if (clk_requires_trigger(bcm_clk)) {
384 pr_err("%s: required trigger missing for %s\n", __func__,
385 name);
386 return false;
387 }
388
389 return kona_dividers_valid(bcm_clk);
390}
391
392static bool kona_clk_valid(struct kona_clk *bcm_clk)
393{
394 switch (bcm_clk->type) {
395 case bcm_clk_peri:
396 if (!peri_clk_data_valid(bcm_clk))
397 return false;
398 break;
399 default:
400 pr_err("%s: unrecognized clock type (%d)\n", __func__,
401 (int)bcm_clk->type);
402 return false;
403 }
404 return true;
405}
406
407/*
408 * Scan an array of parent clock names to determine whether there
409 * are any entries containing BAD_CLK_NAME. Such entries are
410 * placeholders for non-supported clocks. Keep track of the
411 * position of each clock name in the original array.
412 *
413 * Allocates an array of pointers to to hold the names of all
414 * non-null entries in the original array, and returns a pointer to
415 * that array in *names. This will be used for registering the
416 * clock with the common clock code. On successful return,
417 * *count indicates how many entries are in that names array.
418 *
419 * If there is more than one entry in the resulting names array,
420 * another array is allocated to record the parent selector value
421 * for each (defined) parent clock. This is the value that
422 * represents this parent clock in the clock's source selector
423 * register. The position of the clock in the original parent array
424 * defines that selector value. The number of entries in this array
425 * is the same as the number of entries in the parent names array.
426 *
427 * The array of selector values is returned. If the clock has no
428 * parents, no selector is required and a null pointer is returned.
429 *
430 * Returns a null pointer if the clock names array supplied was
431 * null. (This is not an error.)
432 *
433 * Returns a pointer-coded error if an error occurs.
434 */
435static u32 *parent_process(const char *clocks[],
436 u32 *count, const char ***names)
437{
438 static const char **parent_names;
439 static u32 *parent_sel;
440 const char **clock;
441 u32 parent_count;
442 u32 bad_count = 0;
443 u32 orig_count;
444 u32 i;
445 u32 j;
446
447 *count = 0; /* In case of early return */
448 *names = NULL;
449 if (!clocks)
450 return NULL;
451
452 /*
453 * Count the number of names in the null-terminated array,
454 * and find out how many of those are actually clock names.
455 */
456 for (clock = clocks; *clock; clock++)
457 if (*clock == BAD_CLK_NAME)
458 bad_count++;
459 orig_count = (u32)(clock - clocks);
460 parent_count = orig_count - bad_count;
461
462 /* If all clocks are unsupported, we treat it as no clock */
463 if (!parent_count)
464 return NULL;
465
466 /* Avoid exceeding our parent clock limit */
467 if (parent_count > PARENT_COUNT_MAX) {
468 pr_err("%s: too many parents (%u > %u)\n", __func__,
469 parent_count, PARENT_COUNT_MAX);
470 return ERR_PTR(-EINVAL);
471 }
472
473 /*
474 * There is one parent name for each defined parent clock.
475 * We also maintain an array containing the selector value
476 * for each defined clock. If there's only one clock, the
477 * selector is not required, but we allocate space for the
478 * array anyway to keep things simple.
479 */
480 parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL);
481 if (!parent_names) {
482 pr_err("%s: error allocating %u parent names\n", __func__,
483 parent_count);
484 return ERR_PTR(-ENOMEM);
485 }
486
487 /* There is at least one parent, so allocate a selector array */
488
489 parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL);
490 if (!parent_sel) {
491 pr_err("%s: error allocating %u parent selectors\n", __func__,
492 parent_count);
493 kfree(parent_names);
494
495 return ERR_PTR(-ENOMEM);
496 }
497
498 /* Now fill in the parent names and selector arrays */
499 for (i = 0, j = 0; i < orig_count; i++) {
500 if (clocks[i] != BAD_CLK_NAME) {
501 parent_names[j] = clocks[i];
502 parent_sel[j] = i;
503 j++;
504 }
505 }
506 *names = parent_names;
507 *count = parent_count;
508
509 return parent_sel;
510}
511
512static int
513clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel,
514 struct clk_init_data *init_data)
515{
516 const char **parent_names = NULL;
517 u32 parent_count = 0;
518 u32 *parent_sel;
519
520 /*
521 * If a peripheral clock has multiple parents, the value
522 * used by the hardware to select that parent is represented
523 * by the parent clock's position in the "clocks" list. Some
524 * values don't have defined or supported clocks; these will
525 * have BAD_CLK_NAME entries in the parents[] array. The
526 * list is terminated by a NULL entry.
527 *
528 * We need to supply (only) the names of defined parent
529 * clocks when registering a clock though, so we use an
530 * array of parent selector values to map between the
531 * indexes the common clock code uses and the selector
532 * values we need.
533 */
534 parent_sel = parent_process(clocks, &parent_count, &parent_names);
535 if (IS_ERR(parent_sel)) {
536 int ret = PTR_ERR(parent_sel);
537
538 pr_err("%s: error processing parent clocks for %s (%d)\n",
539 __func__, init_data->name, ret);
540
541 return ret;
542 }
543
544 init_data->parent_names = parent_names;
545 init_data->num_parents = parent_count;
546
547 sel->parent_count = parent_count;
548 sel->parent_sel = parent_sel;
549
550 return 0;
551}
552
553static void clk_sel_teardown(struct bcm_clk_sel *sel,
554 struct clk_init_data *init_data)
555{
556 kfree(sel->parent_sel);
557 sel->parent_sel = NULL;
558 sel->parent_count = 0;
559
560 init_data->num_parents = 0;
561 kfree(init_data->parent_names);
562 init_data->parent_names = NULL;
563}
564
565static void peri_clk_teardown(struct peri_clk_data *data,
566 struct clk_init_data *init_data)
567{
568 clk_sel_teardown(&data->sel, init_data);
569 init_data->ops = NULL;
570}
571
572/*
573 * Caller is responsible for freeing the parent_names[] and
574 * parent_sel[] arrays in the peripheral clock's "data" structure
575 * that can be assigned if the clock has one or more parent clocks
576 * associated with it.
577 */
578static int peri_clk_setup(struct ccu_data *ccu, struct peri_clk_data *data,
579 struct clk_init_data *init_data)
580{
581 init_data->ops = &kona_peri_clk_ops;
582 init_data->flags = CLK_IGNORE_UNUSED;
583
584 return clk_sel_setup(data->clocks, &data->sel, init_data);
585}
586
587static void bcm_clk_teardown(struct kona_clk *bcm_clk)
588{
589 switch (bcm_clk->type) {
590 case bcm_clk_peri:
591 peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
592 break;
593 default:
594 break;
595 }
596 bcm_clk->data = NULL;
597 bcm_clk->type = bcm_clk_none;
598}
599
600static void kona_clk_teardown(struct clk *clk)
601{
602 struct clk_hw *hw;
603 struct kona_clk *bcm_clk;
604
605 if (!clk)
606 return;
607
608 hw = __clk_get_hw(clk);
609 if (!hw) {
610 pr_err("%s: clk %p has null hw pointer\n", __func__, clk);
611 return;
612 }
613 clk_unregister(clk);
614
615 bcm_clk = to_kona_clk(hw);
616 bcm_clk_teardown(bcm_clk);
617}
618
619struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
620 enum bcm_clk_type type, void *data)
621{
622 struct kona_clk *bcm_clk;
623 struct clk_init_data *init_data;
624 struct clk *clk = NULL;
625
626 bcm_clk = kzalloc(sizeof(*bcm_clk), GFP_KERNEL);
627 if (!bcm_clk) {
628 pr_err("%s: failed to allocate bcm_clk for %s\n", __func__,
629 name);
630 return NULL;
631 }
632 bcm_clk->ccu = ccu;
633 bcm_clk->name = name;
634
635 init_data = &bcm_clk->init_data;
636 init_data->name = name;
637 switch (type) {
638 case bcm_clk_peri:
639 if (peri_clk_setup(ccu, data, init_data))
640 goto out_free;
641 break;
642 default:
643 data = NULL;
644 break;
645 }
646 bcm_clk->type = type;
647 bcm_clk->data = data;
648
649 /* Make sure everything makes sense before we set it up */
650 if (!kona_clk_valid(bcm_clk)) {
651 pr_err("%s: clock data invalid for %s\n", __func__, name);
652 goto out_teardown;
653 }
654
655 bcm_clk->hw.init = init_data;
656 clk = clk_register(NULL, &bcm_clk->hw);
657 if (IS_ERR(clk)) {
658 pr_err("%s: error registering clock %s (%ld)\n", __func__,
659 name, PTR_ERR(clk));
660 goto out_teardown;
661 }
662 BUG_ON(!clk);
663
664 return clk;
665out_teardown:
666 bcm_clk_teardown(bcm_clk);
667out_free:
668 kfree(bcm_clk);
669
670 return NULL;
671}
672
673static void ccu_clks_teardown(struct ccu_data *ccu)
674{
675 u32 i;
676
677 for (i = 0; i < ccu->data.clk_num; i++)
678 kona_clk_teardown(ccu->data.clks[i]);
679 kfree(ccu->data.clks);
680}
681
682static void kona_ccu_teardown(struct ccu_data *ccu)
683{
684 if (!ccu)
685 return;
686
687 if (!ccu->base)
688 goto done;
689
690 of_clk_del_provider(ccu->node); /* safe if never added */
691 ccu_clks_teardown(ccu);
692 list_del(&ccu->links);
693 of_node_put(ccu->node);
694 iounmap(ccu->base);
695done:
696 kfree(ccu->name);
697 kfree(ccu);
698}
699
700/*
701 * Set up a CCU. Call the provided ccu_clks_setup callback to
702 * initialize the array of clocks provided by the CCU.
703 */
704void __init kona_dt_ccu_setup(struct device_node *node,
705 int (*ccu_clks_setup)(struct ccu_data *))
706{
707 struct ccu_data *ccu;
708 struct resource res = { 0 };
709 resource_size_t range;
710 int ret;
711
712 ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
713 if (ccu)
714 ccu->name = kstrdup(node->name, GFP_KERNEL);
715 if (!ccu || !ccu->name) {
716 pr_err("%s: unable to allocate CCU struct for %s\n",
717 __func__, node->name);
718 kfree(ccu);
719
720 return;
721 }
722
723 ret = of_address_to_resource(node, 0, &res);
724 if (ret) {
725 pr_err("%s: no valid CCU registers found for %s\n", __func__,
726 node->name);
727 goto out_err;
728 }
729
730 range = resource_size(&res);
731 if (range > (resource_size_t)U32_MAX) {
732 pr_err("%s: address range too large for %s\n", __func__,
733 node->name);
734 goto out_err;
735 }
736
737 ccu->range = (u32)range;
738 ccu->base = ioremap(res.start, ccu->range);
739 if (!ccu->base) {
740 pr_err("%s: unable to map CCU registers for %s\n", __func__,
741 node->name);
742 goto out_err;
743 }
744
745 spin_lock_init(&ccu->lock);
746 INIT_LIST_HEAD(&ccu->links);
747 ccu->node = of_node_get(node);
748
749 list_add_tail(&ccu->links, &ccu_list);
750
751 /* Set up clocks array (in ccu->data) */
752 if (ccu_clks_setup(ccu))
753 goto out_err;
754
755 ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data);
756 if (ret) {
757 pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
758 node->name, ret);
759 goto out_err;
760 }
761
762 if (!kona_ccu_init(ccu))
763 pr_err("Broadcom %s initialization had errors\n", node->name);
764
765 return;
766out_err:
767 kona_ccu_teardown(ccu);
768 pr_err("Broadcom %s setup aborted\n", node->name);
769}
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
new file mode 100644
index 000000000000..e3d339e08309
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.c
@@ -0,0 +1,1033 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "clk-kona.h"
16
17#include <linux/delay.h>
18
19#define CCU_ACCESS_PASSWORD 0xA5A500
20#define CLK_GATE_DELAY_LOOP 2000
21
22/* Bitfield operations */
23
24/* Produces a mask of set bits covering a range of a 32-bit value */
25static inline u32 bitfield_mask(u32 shift, u32 width)
26{
27 return ((1 << width) - 1) << shift;
28}
29
30/* Extract the value of a bitfield found within a given register value */
31static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
32{
33 return (reg_val & bitfield_mask(shift, width)) >> shift;
34}
35
36/* Replace the value of a bitfield found within a given register value */
37static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
38{
39 u32 mask = bitfield_mask(shift, width);
40
41 return (reg_val & ~mask) | (val << shift);
42}
43
44/* Divider and scaling helpers */
45
46/*
47 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
48 * unsigned. Note that unlike do_div(), the remainder is discarded
49 * and the return value is the quotient (not the remainder).
50 */
51u64 do_div_round_closest(u64 dividend, unsigned long divisor)
52{
53 u64 result;
54
55 result = dividend + ((u64)divisor >> 1);
56 (void)do_div(result, divisor);
57
58 return result;
59}
60
61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{
64 return (u64)reg_div + ((u64)1 << div->frac_width);
65}
66
67/*
68 * Build a scaled divider value as close as possible to the
69 * given whole part (div_value) and fractional part (expressed
70 * in billionths).
71 */
72u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
73{
74 u64 combined;
75
76 BUG_ON(!div_value);
77 BUG_ON(billionths >= BILLION);
78
79 combined = (u64)div_value * BILLION + billionths;
80 combined <<= div->frac_width;
81
82 return do_div_round_closest(combined, BILLION);
83}
84
85/* The scaled minimum divisor representable by a divider */
86static inline u64
87scaled_div_min(struct bcm_clk_div *div)
88{
89 if (divider_is_fixed(div))
90 return (u64)div->fixed;
91
92 return scaled_div_value(div, 0);
93}
94
95/* The scaled maximum divisor representable by a divider */
96u64 scaled_div_max(struct bcm_clk_div *div)
97{
98 u32 reg_div;
99
100 if (divider_is_fixed(div))
101 return (u64)div->fixed;
102
103 reg_div = ((u32)1 << div->width) - 1;
104
105 return scaled_div_value(div, reg_div);
106}
107
108/*
109 * Convert a scaled divisor into its divider representation as
110 * stored in a divider register field.
111 */
112static inline u32
113divider(struct bcm_clk_div *div, u64 scaled_div)
114{
115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div));
117
118 return (u32)(scaled_div - ((u64)1 << div->frac_width));
119}
120
121/* Return a rate scaled for use when dividing by a scaled divisor. */
122static inline u64
123scale_rate(struct bcm_clk_div *div, u32 rate)
124{
125 if (divider_is_fixed(div))
126 return (u64)rate;
127
128 return (u64)rate << div->frac_width;
129}
130
131/* CCU access */
132
133/* Read a 32-bit register value from a CCU's address space. */
134static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
135{
136 return readl(ccu->base + reg_offset);
137}
138
139/* Write a 32-bit register value into a CCU's address space. */
140static inline void
141__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
142{
143 writel(reg_val, ccu->base + reg_offset);
144}
145
146static inline unsigned long ccu_lock(struct ccu_data *ccu)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&ccu->lock, flags);
151
152 return flags;
153}
154static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
155{
156 spin_unlock_irqrestore(&ccu->lock, flags);
157}
158
159/*
160 * Enable/disable write access to CCU protected registers. The
161 * WR_ACCESS register for all CCUs is at offset 0.
162 */
163static inline void __ccu_write_enable(struct ccu_data *ccu)
164{
165 if (ccu->write_enabled) {
166 pr_err("%s: access already enabled for %s\n", __func__,
167 ccu->name);
168 return;
169 }
170 ccu->write_enabled = true;
171 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
172}
173
174static inline void __ccu_write_disable(struct ccu_data *ccu)
175{
176 if (!ccu->write_enabled) {
177 pr_err("%s: access wasn't enabled for %s\n", __func__,
178 ccu->name);
179 return;
180 }
181
182 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
183 ccu->write_enabled = false;
184}
185
186/*
187 * Poll a register in a CCU's address space, returning when the
188 * specified bit in that register's value is set (or clear). Delay
189 * a microsecond after each read of the register. Returns true if
190 * successful, or false if we gave up trying.
191 *
192 * Caller must ensure the CCU lock is held.
193 */
194static inline bool
195__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
196{
197 unsigned int tries;
198 u32 bit_mask = 1 << bit;
199
200 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
201 u32 val;
202 bool bit_val;
203
204 val = __ccu_read(ccu, reg_offset);
205 bit_val = (val & bit_mask) != 0;
206 if (bit_val == want)
207 return true;
208 udelay(1);
209 }
210 return false;
211}
212
213/* Gate operations */
214
215/* Determine whether a clock is gated. CCU lock must be held. */
216static bool
217__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
218{
219 u32 bit_mask;
220 u32 reg_val;
221
222 /* If there is no gate we can assume it's enabled. */
223 if (!gate_exists(gate))
224 return true;
225
226 bit_mask = 1 << gate->status_bit;
227 reg_val = __ccu_read(ccu, gate->offset);
228
229 return (reg_val & bit_mask) != 0;
230}
231
232/* Determine whether a clock is gated. */
233static bool
234is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
235{
236 long flags;
237 bool ret;
238
239 /* Avoid taking the lock if we can */
240 if (!gate_exists(gate))
241 return true;
242
243 flags = ccu_lock(ccu);
244 ret = __is_clk_gate_enabled(ccu, gate);
245 ccu_unlock(ccu, flags);
246
247 return ret;
248}
249
250/*
251 * Commit our desired gate state to the hardware.
252 * Returns true if successful, false otherwise.
253 */
254static bool
255__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
256{
257 u32 reg_val;
258 u32 mask;
259 bool enabled = false;
260
261 BUG_ON(!gate_exists(gate));
262 if (!gate_is_sw_controllable(gate))
263 return true; /* Nothing we can change */
264
265 reg_val = __ccu_read(ccu, gate->offset);
266
267 /* For a hardware/software gate, set which is in control */
268 if (gate_is_hw_controllable(gate)) {
269 mask = (u32)1 << gate->hw_sw_sel_bit;
270 if (gate_is_sw_managed(gate))
271 reg_val |= mask;
272 else
273 reg_val &= ~mask;
274 }
275
276 /*
277 * If software is in control, enable or disable the gate.
278 * If hardware is, clear the enabled bit for good measure.
279 * If a software controlled gate can't be disabled, we're
280 * required to write a 0 into the enable bit (but the gate
281 * will be enabled).
282 */
283 mask = (u32)1 << gate->en_bit;
284 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
285 !gate_is_no_disable(gate))
286 reg_val |= mask;
287 else
288 reg_val &= ~mask;
289
290 __ccu_write(ccu, gate->offset, reg_val);
291
292 /* For a hardware controlled gate, we're done */
293 if (!gate_is_sw_managed(gate))
294 return true;
295
296 /* Otherwise wait for the gate to be in desired state */
297 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
298}
299
300/*
301 * Initialize a gate. Our desired state (hardware/software select,
302 * and if software, its enable state) is committed to hardware
303 * without the usual checks to see if it's already set up that way.
304 * Returns true if successful, false otherwise.
305 */
306static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
307{
308 if (!gate_exists(gate))
309 return true;
310 return __gate_commit(ccu, gate);
311}
312
313/*
314 * Set a gate to enabled or disabled state. Does nothing if the
315 * gate is not currently under software control, or if it is already
316 * in the requested state. Returns true if successful, false
317 * otherwise. CCU lock must be held.
318 */
319static bool
320__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
321{
322 bool ret;
323
324 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
325 return true; /* Nothing to do */
326
327 if (!enable && gate_is_no_disable(gate)) {
328 pr_warn("%s: invalid gate disable request (ignoring)\n",
329 __func__);
330 return true;
331 }
332
333 if (enable == gate_is_enabled(gate))
334 return true; /* No change */
335
336 gate_flip_enabled(gate);
337 ret = __gate_commit(ccu, gate);
338 if (!ret)
339 gate_flip_enabled(gate); /* Revert the change */
340
341 return ret;
342}
343
344/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
345static int clk_gate(struct ccu_data *ccu, const char *name,
346 struct bcm_clk_gate *gate, bool enable)
347{
348 unsigned long flags;
349 bool success;
350
351 /*
352 * Avoid taking the lock if we can. We quietly ignore
353 * requests to change state that don't make sense.
354 */
355 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
356 return 0;
357 if (!enable && gate_is_no_disable(gate))
358 return 0;
359
360 flags = ccu_lock(ccu);
361 __ccu_write_enable(ccu);
362
363 success = __clk_gate(ccu, gate, enable);
364
365 __ccu_write_disable(ccu);
366 ccu_unlock(ccu, flags);
367
368 if (success)
369 return 0;
370
371 pr_err("%s: failed to %s gate for %s\n", __func__,
372 enable ? "enable" : "disable", name);
373
374 return -EIO;
375}
376
377/* Trigger operations */
378
379/*
380 * Caller must ensure CCU lock is held and access is enabled.
381 * Returns true if successful, false otherwise.
382 */
383static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
384{
385 /* Trigger the clock and wait for it to finish */
386 __ccu_write(ccu, trig->offset, 1 << trig->bit);
387
388 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
389}
390
391/* Divider operations */
392
393/* Read a divider value and return the scaled divisor it represents. */
394static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
395{
396 unsigned long flags;
397 u32 reg_val;
398 u32 reg_div;
399
400 if (divider_is_fixed(div))
401 return (u64)div->fixed;
402
403 flags = ccu_lock(ccu);
404 reg_val = __ccu_read(ccu, div->offset);
405 ccu_unlock(ccu, flags);
406
407 /* Extract the full divider field from the register value */
408 reg_div = bitfield_extract(reg_val, div->shift, div->width);
409
410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div);
412}
413
414/*
415 * Convert a divider's scaled divisor value into its recorded form
416 * and commit it into the hardware divider register.
417 *
418 * Returns 0 on success. Returns -EINVAL for invalid arguments.
419 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
420 */
421static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
422 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
423{
424 bool enabled;
425 u32 reg_div;
426 u32 reg_val;
427 int ret = 0;
428
429 BUG_ON(divider_is_fixed(div));
430
431 /*
432 * If we're just initializing the divider, and no initial
433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it.
435 */
436 if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->offset);
438 reg_div = bitfield_extract(reg_val, div->shift, div->width);
439 div->scaled_div = scaled_div_value(div, reg_div);
440
441 return 0;
442 }
443
444 /* Convert the scaled divisor to the value we need to record */
445 reg_div = divider(div, div->scaled_div);
446
447 /* Clock needs to be enabled before changing the rate */
448 enabled = __is_clk_gate_enabled(ccu, gate);
449 if (!enabled && !__clk_gate(ccu, gate, true)) {
450 ret = -ENXIO;
451 goto out;
452 }
453
454 /* Replace the divider value and record the result */
455 reg_val = __ccu_read(ccu, div->offset);
456 reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
457 __ccu_write(ccu, div->offset, reg_val);
458
459 /* If the trigger fails we still want to disable the gate */
460 if (!__clk_trigger(ccu, trig))
461 ret = -EIO;
462
463 /* Disable the clock again if it was disabled to begin with */
464 if (!enabled && !__clk_gate(ccu, gate, false))
465 ret = ret ? ret : -ENXIO; /* return first error */
466out:
467 return ret;
468}
469
470/*
471 * Initialize a divider by committing our desired state to hardware
472 * without the usual checks to see if it's already set up that way.
473 * Returns true if successful, false otherwise.
474 */
475static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
476 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
477{
478 if (!divider_exists(div) || divider_is_fixed(div))
479 return true;
480 return !__div_commit(ccu, gate, div, trig);
481}
482
483static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
484 struct bcm_clk_div *div, struct bcm_clk_trig *trig,
485 u64 scaled_div)
486{
487 unsigned long flags;
488 u64 previous;
489 int ret;
490
491 BUG_ON(divider_is_fixed(div));
492
493 previous = div->scaled_div;
494 if (previous == scaled_div)
495 return 0; /* No change */
496
497 div->scaled_div = scaled_div;
498
499 flags = ccu_lock(ccu);
500 __ccu_write_enable(ccu);
501
502 ret = __div_commit(ccu, gate, div, trig);
503
504 __ccu_write_disable(ccu);
505 ccu_unlock(ccu, flags);
506
507 if (ret)
508 div->scaled_div = previous; /* Revert the change */
509
510 return ret;
511
512}
513
514/* Common clock rate helpers */
515
516/*
517 * Implement the common clock framework recalc_rate method, taking
518 * into account a divider and an optional pre-divider. The
519 * pre-divider register pointer may be NULL.
520 */
521static unsigned long clk_recalc_rate(struct ccu_data *ccu,
522 struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
523 unsigned long parent_rate)
524{
525 u64 scaled_parent_rate;
526 u64 scaled_div;
527 u64 result;
528
529 if (!divider_exists(div))
530 return parent_rate;
531
532 if (parent_rate > (unsigned long)LONG_MAX)
533 return 0; /* actually this would be a caller bug */
534
535 /*
536 * If there is a pre-divider, divide the scaled parent rate
537 * by the pre-divider value first. In this case--to improve
538 * accuracy--scale the parent rate by *both* the pre-divider
539 * value and the divider before actually computing the
540 * result of the pre-divider.
541 *
542 * If there's only one divider, just scale the parent rate.
543 */
544 if (pre_div && divider_exists(pre_div)) {
545 u64 scaled_rate;
546
547 scaled_rate = scale_rate(pre_div, parent_rate);
548 scaled_rate = scale_rate(div, scaled_rate);
549 scaled_div = divider_read_scaled(ccu, pre_div);
550 scaled_parent_rate = do_div_round_closest(scaled_rate,
551 scaled_div);
552 } else {
553 scaled_parent_rate = scale_rate(div, parent_rate);
554 }
555
556 /*
557 * Get the scaled divisor value, and divide the scaled
558 * parent rate by that to determine this clock's resulting
559 * rate.
560 */
561 scaled_div = divider_read_scaled(ccu, div);
562 result = do_div_round_closest(scaled_parent_rate, scaled_div);
563
564 return (unsigned long)result;
565}
566
567/*
568 * Compute the output rate produced when a given parent rate is fed
569 * into two dividers. The pre-divider can be NULL, and even if it's
570 * non-null it may be nonexistent. It's also OK for the divider to
571 * be nonexistent, and in that case the pre-divider is also ignored.
572 *
573 * If scaled_div is non-null, it is used to return the scaled divisor
574 * value used by the (downstream) divider to produce that rate.
575 */
576static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
577 struct bcm_clk_div *pre_div,
578 unsigned long rate, unsigned long parent_rate,
579 u64 *scaled_div)
580{
581 u64 scaled_parent_rate;
582 u64 min_scaled_div;
583 u64 max_scaled_div;
584 u64 best_scaled_div;
585 u64 result;
586
587 BUG_ON(!divider_exists(div));
588 BUG_ON(!rate);
589 BUG_ON(parent_rate > (u64)LONG_MAX);
590
591 /*
592 * If there is a pre-divider, divide the scaled parent rate
593 * by the pre-divider value first. In this case--to improve
594 * accuracy--scale the parent rate by *both* the pre-divider
595 * value and the divider before actually computing the
596 * result of the pre-divider.
597 *
598 * If there's only one divider, just scale the parent rate.
599 *
600 * For simplicity we treat the pre-divider as fixed (for now).
601 */
602 if (divider_exists(pre_div)) {
603 u64 scaled_rate;
604 u64 scaled_pre_div;
605
606 scaled_rate = scale_rate(pre_div, parent_rate);
607 scaled_rate = scale_rate(div, scaled_rate);
608 scaled_pre_div = divider_read_scaled(ccu, pre_div);
609 scaled_parent_rate = do_div_round_closest(scaled_rate,
610 scaled_pre_div);
611 } else {
612 scaled_parent_rate = scale_rate(div, parent_rate);
613 }
614
615 /*
616 * Compute the best possible divider and ensure it is in
617 * range. A fixed divider can't be changed, so just report
618 * the best we can do.
619 */
620 if (!divider_is_fixed(div)) {
621 best_scaled_div = do_div_round_closest(scaled_parent_rate,
622 rate);
623 min_scaled_div = scaled_div_min(div);
624 max_scaled_div = scaled_div_max(div);
625 if (best_scaled_div > max_scaled_div)
626 best_scaled_div = max_scaled_div;
627 else if (best_scaled_div < min_scaled_div)
628 best_scaled_div = min_scaled_div;
629 } else {
630 best_scaled_div = divider_read_scaled(ccu, div);
631 }
632
633 /* OK, figure out the resulting rate */
634 result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
635
636 if (scaled_div)
637 *scaled_div = best_scaled_div;
638
639 return (long)result;
640}
641
642/* Common clock parent helpers */
643
644/*
645 * For a given parent selector (register field) value, find the
646 * index into a selector's parent_sel array that contains it.
647 * Returns the index, or BAD_CLK_INDEX if it's not found.
648 */
649static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
650{
651 u8 i;
652
653 BUG_ON(sel->parent_count > (u32)U8_MAX);
654 for (i = 0; i < sel->parent_count; i++)
655 if (sel->parent_sel[i] == parent_sel)
656 return i;
657 return BAD_CLK_INDEX;
658}
659
660/*
661 * Fetch the current value of the selector, and translate that into
662 * its corresponding index in the parent array we registered with
663 * the clock framework.
664 *
665 * Returns parent array index that corresponds with the value found,
666 * or BAD_CLK_INDEX if the found value is out of range.
667 */
668static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
669{
670 unsigned long flags;
671 u32 reg_val;
672 u32 parent_sel;
673 u8 index;
674
675 /* If there's no selector, there's only one parent */
676 if (!selector_exists(sel))
677 return 0;
678
679 /* Get the value in the selector register */
680 flags = ccu_lock(ccu);
681 reg_val = __ccu_read(ccu, sel->offset);
682 ccu_unlock(ccu, flags);
683
684 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
685
686 /* Look up that selector's parent array index and return it */
687 index = parent_index(sel, parent_sel);
688 if (index == BAD_CLK_INDEX)
689 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
690 __func__, parent_sel, ccu->name, sel->offset);
691
692 return index;
693}
694
695/*
696 * Commit our desired selector value to the hardware.
697 *
698 * Returns 0 on success. Returns -EINVAL for invalid arguments.
699 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
700 */
701static int
702__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
703 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
704{
705 u32 parent_sel;
706 u32 reg_val;
707 bool enabled;
708 int ret = 0;
709
710 BUG_ON(!selector_exists(sel));
711
712 /*
713 * If we're just initializing the selector, and no initial
714 * state was defined in the device tree, we just find out
715 * what its current value is rather than updating it.
716 */
717 if (sel->clk_index == BAD_CLK_INDEX) {
718 u8 index;
719
720 reg_val = __ccu_read(ccu, sel->offset);
721 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
722 index = parent_index(sel, parent_sel);
723 if (index == BAD_CLK_INDEX)
724 return -EINVAL;
725 sel->clk_index = index;
726
727 return 0;
728 }
729
730 BUG_ON((u32)sel->clk_index >= sel->parent_count);
731 parent_sel = sel->parent_sel[sel->clk_index];
732
733 /* Clock needs to be enabled before changing the parent */
734 enabled = __is_clk_gate_enabled(ccu, gate);
735 if (!enabled && !__clk_gate(ccu, gate, true))
736 return -ENXIO;
737
738 /* Replace the selector value and record the result */
739 reg_val = __ccu_read(ccu, sel->offset);
740 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
741 __ccu_write(ccu, sel->offset, reg_val);
742
743 /* If the trigger fails we still want to disable the gate */
744 if (!__clk_trigger(ccu, trig))
745 ret = -EIO;
746
747 /* Disable the clock again if it was disabled to begin with */
748 if (!enabled && !__clk_gate(ccu, gate, false))
749 ret = ret ? ret : -ENXIO; /* return first error */
750
751 return ret;
752}
753
754/*
755 * Initialize a selector by committing our desired state to hardware
756 * without the usual checks to see if it's already set up that way.
757 * Returns true if successful, false otherwise.
758 */
759static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
760 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
761{
762 if (!selector_exists(sel))
763 return true;
764 return !__sel_commit(ccu, gate, sel, trig);
765}
766
767/*
768 * Write a new value into a selector register to switch to a
769 * different parent clock. Returns 0 on success, or an error code
770 * (from __sel_commit()) otherwise.
771 */
772static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
773 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
774 u8 index)
775{
776 unsigned long flags;
777 u8 previous;
778 int ret;
779
780 previous = sel->clk_index;
781 if (previous == index)
782 return 0; /* No change */
783
784 sel->clk_index = index;
785
786 flags = ccu_lock(ccu);
787 __ccu_write_enable(ccu);
788
789 ret = __sel_commit(ccu, gate, sel, trig);
790
791 __ccu_write_disable(ccu);
792 ccu_unlock(ccu, flags);
793
794 if (ret)
795 sel->clk_index = previous; /* Revert the change */
796
797 return ret;
798}
799
800/* Clock operations */
801
802static int kona_peri_clk_enable(struct clk_hw *hw)
803{
804 struct kona_clk *bcm_clk = to_kona_clk(hw);
805 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
806
807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
808}
809
810static void kona_peri_clk_disable(struct clk_hw *hw)
811{
812 struct kona_clk *bcm_clk = to_kona_clk(hw);
813 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
814
815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
816}
817
818static int kona_peri_clk_is_enabled(struct clk_hw *hw)
819{
820 struct kona_clk *bcm_clk = to_kona_clk(hw);
821 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
822
823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
824}
825
826static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
827 unsigned long parent_rate)
828{
829 struct kona_clk *bcm_clk = to_kona_clk(hw);
830 struct peri_clk_data *data = bcm_clk->peri;
831
832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
833 parent_rate);
834}
835
836static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
837 unsigned long *parent_rate)
838{
839 struct kona_clk *bcm_clk = to_kona_clk(hw);
840 struct bcm_clk_div *div = &bcm_clk->peri->div;
841
842 if (!divider_exists(div))
843 return __clk_get_rate(hw->clk);
844
845 /* Quietly avoid a zero rate */
846 return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
847 rate ? rate : 1, *parent_rate, NULL);
848}
849
850static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
851{
852 struct kona_clk *bcm_clk = to_kona_clk(hw);
853 struct peri_clk_data *data = bcm_clk->peri;
854 struct bcm_clk_sel *sel = &data->sel;
855 struct bcm_clk_trig *trig;
856 int ret;
857
858 BUG_ON(index >= sel->parent_count);
859
860 /* If there's only one parent we don't require a selector */
861 if (!selector_exists(sel))
862 return 0;
863
864 /*
865 * The regular trigger is used by default, but if there's a
866 * pre-trigger we want to use that instead.
867 */
868 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
869 : &data->trig;
870
871 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
872 if (ret == -ENXIO) {
873 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
874 ret = -EIO; /* Don't proliferate weird errors */
875 } else if (ret == -EIO) {
876 pr_err("%s: %strigger failed for %s\n", __func__,
877 trig == &data->pre_trig ? "pre-" : "",
878 bcm_clk->name);
879 }
880
881 return ret;
882}
883
884static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
885{
886 struct kona_clk *bcm_clk = to_kona_clk(hw);
887 struct peri_clk_data *data = bcm_clk->peri;
888 u8 index;
889
890 index = selector_read_index(bcm_clk->ccu, &data->sel);
891
892 /* Not all callers would handle an out-of-range value gracefully */
893 return index == BAD_CLK_INDEX ? 0 : index;
894}
895
896static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
897 unsigned long parent_rate)
898{
899 struct kona_clk *bcm_clk = to_kona_clk(hw);
900 struct peri_clk_data *data = bcm_clk->peri;
901 struct bcm_clk_div *div = &data->div;
902 u64 scaled_div = 0;
903 int ret;
904
905 if (parent_rate > (unsigned long)LONG_MAX)
906 return -EINVAL;
907
908 if (rate == __clk_get_rate(hw->clk))
909 return 0;
910
911 if (!divider_exists(div))
912 return rate == parent_rate ? 0 : -EINVAL;
913
914 /*
915 * A fixed divider can't be changed. (Nor can a fixed
916 * pre-divider be, but for now we never actually try to
917 * change that.) Tolerate a request for a no-op change.
918 */
919 if (divider_is_fixed(&data->div))
920 return rate == parent_rate ? 0 : -EINVAL;
921
922 /*
923 * Get the scaled divisor value needed to achieve a clock
924 * rate as close as possible to what was requested, given
925 * the parent clock rate supplied.
926 */
927 (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
928 rate ? rate : 1, parent_rate, &scaled_div);
929
930 /*
931 * We aren't updating any pre-divider at this point, so
932 * we'll use the regular trigger.
933 */
934 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
935 &data->trig, scaled_div);
936 if (ret == -ENXIO) {
937 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
938 ret = -EIO; /* Don't proliferate weird errors */
939 } else if (ret == -EIO) {
940 pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name);
941 }
942
943 return ret;
944}
945
946struct clk_ops kona_peri_clk_ops = {
947 .enable = kona_peri_clk_enable,
948 .disable = kona_peri_clk_disable,
949 .is_enabled = kona_peri_clk_is_enabled,
950 .recalc_rate = kona_peri_clk_recalc_rate,
951 .round_rate = kona_peri_clk_round_rate,
952 .set_parent = kona_peri_clk_set_parent,
953 .get_parent = kona_peri_clk_get_parent,
954 .set_rate = kona_peri_clk_set_rate,
955};
956
957/* Put a peripheral clock into its initial state */
958static bool __peri_clk_init(struct kona_clk *bcm_clk)
959{
960 struct ccu_data *ccu = bcm_clk->ccu;
961 struct peri_clk_data *peri = bcm_clk->peri;
962 const char *name = bcm_clk->name;
963 struct bcm_clk_trig *trig;
964
965 BUG_ON(bcm_clk->type != bcm_clk_peri);
966
967 if (!gate_init(ccu, &peri->gate)) {
968 pr_err("%s: error initializing gate for %s\n", __func__, name);
969 return false;
970 }
971 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
972 pr_err("%s: error initializing divider for %s\n", __func__,
973 name);
974 return false;
975 }
976
977 /*
978 * For the pre-divider and selector, the pre-trigger is used
979 * if it's present, otherwise we just use the regular trigger.
980 */
981 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
982 : &peri->trig;
983
984 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
985 pr_err("%s: error initializing pre-divider for %s\n", __func__,
986 name);
987 return false;
988 }
989
990 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
991 pr_err("%s: error initializing selector for %s\n", __func__,
992 name);
993 return false;
994 }
995
996 return true;
997}
998
999static bool __kona_clk_init(struct kona_clk *bcm_clk)
1000{
1001 switch (bcm_clk->type) {
1002 case bcm_clk_peri:
1003 return __peri_clk_init(bcm_clk);
1004 default:
1005 BUG();
1006 }
1007 return -EINVAL;
1008}
1009
1010/* Set a CCU and all its clocks into their desired initial state */
1011bool __init kona_ccu_init(struct ccu_data *ccu)
1012{
1013 unsigned long flags;
1014 unsigned int which;
1015 struct clk **clks = ccu->data.clks;
1016 bool success = true;
1017
1018 flags = ccu_lock(ccu);
1019 __ccu_write_enable(ccu);
1020
1021 for (which = 0; which < ccu->data.clk_num; which++) {
1022 struct kona_clk *bcm_clk;
1023
1024 if (!clks[which])
1025 continue;
1026 bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
1027 success &= __kona_clk_init(bcm_clk);
1028 }
1029
1030 __ccu_write_disable(ccu);
1031 ccu_unlock(ccu, flags);
1032 return success;
1033}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
new file mode 100644
index 000000000000..5e139adc3dc5
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.h
@@ -0,0 +1,410 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _CLK_KONA_H
16#define _CLK_KONA_H
17
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/slab.h>
22#include <linux/device.h>
23#include <linux/of.h>
24#include <linux/clk-provider.h>
25
26#define BILLION 1000000000
27
28/* The common clock framework uses u8 to represent a parent index */
29#define PARENT_COUNT_MAX ((u32)U8_MAX)
30
31#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */
32#define BAD_CLK_NAME ((const char *)-1)
33
34#define BAD_SCALED_DIV_VALUE U64_MAX
35
36/*
37 * Utility macros for object flag management. If possible, flags
38 * should be defined such that 0 is the desired default value.
39 */
40#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag
41#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag))
42#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag)))
43#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag))
44#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag)))
45
46/* Clock field state tests */
47
48#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS)
49#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED)
50#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW)
51#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW)
52#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED)
53#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE)
54
55#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED)
56
57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
59#define divider_has_fraction(div) (!divider_is_fixed(div) && \
60 (div)->frac_width > 0)
61
62#define selector_exists(sel) ((sel)->width != 0)
63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
64
65/* Clock type, used to tell common block what it's part of */
66enum bcm_clk_type {
67 bcm_clk_none, /* undefined clock type */
68 bcm_clk_bus,
69 bcm_clk_core,
70 bcm_clk_peri
71};
72
73/*
74 * Each CCU defines a mapped area of memory containing registers
75 * used to manage clocks implemented by the CCU. Access to memory
76 * within the CCU's space is serialized by a spinlock. Before any
77 * (other) address can be written, a special access "password" value
78 * must be written to its WR_ACCESS register (located at the base
79 * address of the range). We keep track of the name of each CCU as
80 * it is set up, and maintain them in a list.
81 */
82struct ccu_data {
83 void __iomem *base; /* base of mapped address space */
84 spinlock_t lock; /* serialization lock */
85 bool write_enabled; /* write access is currently enabled */
86 struct list_head links; /* for ccu_list */
87 struct device_node *node;
88 struct clk_onecell_data data;
89 const char *name;
90 u32 range; /* byte range of address space */
91};
92
93/*
94 * Gating control and status is managed by a 32-bit gate register.
95 *
96 * There are several types of gating available:
97 * - (no gate)
98 * A clock with no gate is assumed to be always enabled.
99 * - hardware-only gating (auto-gating)
100 * Enabling or disabling clocks with this type of gate is
101 * managed automatically by the hardware. Such clocks can be
102 * considered by the software to be enabled. The current status
103 * of auto-gated clocks can be read from the gate status bit.
104 * - software-only gating
105 * Auto-gating is not available for this type of clock.
106 * Instead, software manages whether it's enabled by setting or
107 * clearing the enable bit. The current gate status of a gate
108 * under software control can be read from the gate status bit.
109 * To ensure a change to the gating status is complete, the
110 * status bit can be polled to verify that the gate has entered
111 * the desired state.
112 * - selectable hardware or software gating
113 * Gating for this type of clock can be configured to be either
114 * under software or hardware control. Which type is in use is
115 * determined by the hw_sw_sel bit of the gate register.
116 */
117struct bcm_clk_gate {
118 u32 offset; /* gate register offset */
119 u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */
120 u32 en_bit; /* 0: disable; 1: enable */
121 u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */
122 u32 flags; /* BCM_CLK_GATE_FLAGS_* below */
123};
124
125/*
126 * Gate flags:
127 * HW means this gate can be auto-gated
128 * SW means the state of this gate can be software controlled
129 * NO_DISABLE means this gate is (only) enabled if under software control
130 * SW_MANAGED means the status of this gate is under software control
131 * ENABLED means this software-managed gate is *supposed* to be enabled
132 */
133#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */
134#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */
135#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */
136#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */
137#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */
138#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */
139
140/*
141 * Gate initialization macros.
142 *
143 * Any gate initially under software control will be enabled.
144 */
145
146/* A hardware/software gate initially under software control */
147#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
148 { \
149 .offset = (_offset), \
150 .status_bit = (_status_bit), \
151 .en_bit = (_en_bit), \
152 .hw_sw_sel_bit = (_hw_sw_sel_bit), \
153 .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
154 FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \
155 FLAG(GATE, EXISTS), \
156 }
157
158/* A hardware/software gate initially under hardware control */
159#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
160 { \
161 .offset = (_offset), \
162 .status_bit = (_status_bit), \
163 .en_bit = (_en_bit), \
164 .hw_sw_sel_bit = (_hw_sw_sel_bit), \
165 .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
166 FLAG(GATE, EXISTS), \
167 }
168
169/* A hardware-or-enabled gate (enabled if not under hardware control) */
170#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
171 { \
172 .offset = (_offset), \
173 .status_bit = (_status_bit), \
174 .en_bit = (_en_bit), \
175 .hw_sw_sel_bit = (_hw_sw_sel_bit), \
176 .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
177 FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \
178 }
179
180/* A software-only gate */
181#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \
182 { \
183 .offset = (_offset), \
184 .status_bit = (_status_bit), \
185 .en_bit = (_en_bit), \
186 .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \
187 FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \
188 }
189
190/* A hardware-only gate */
191#define HW_ONLY_GATE(_offset, _status_bit) \
192 { \
193 .offset = (_offset), \
194 .status_bit = (_status_bit), \
195 .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \
196 }
197
198/*
199 * Each clock can have zero, one, or two dividers which change the
200 * output rate of the clock. Each divider can be either fixed or
201 * variable. If there are two dividers, they are the "pre-divider"
202 * and the "regular" or "downstream" divider. If there is only one,
203 * there is no pre-divider.
204 *
205 * A fixed divider is any non-zero (positive) value, and it
206 * indicates how the input rate is affected by the divider.
207 *
208 * The value of a variable divider is maintained in a sub-field of a
209 * 32-bit divider register. The position of the field in the
210 * register is defined by its offset and width. The value recorded
211 * in this field is always 1 less than the value it represents.
212 *
213 * In addition, a variable divider can indicate that some subset
214 * of its bits represent a "fractional" part of the divider. Such
215 * bits comprise the low-order portion of the divider field, and can
216 * be viewed as representing the portion of the divider that lies to
217 * the right of the decimal point. Most variable dividers have zero
218 * fractional bits. Variable dividers with non-zero fraction width
219 * still record a value 1 less than the value they represent; the
220 * added 1 does *not* affect the low-order bit in this case, it
221 * affects the bits above the fractional part only. (Often in this
222 * code a divider field value is distinguished from the value it
223 * represents by referring to the latter as a "divisor".)
224 *
225 * In order to avoid dealing with fractions, divider arithmetic is
226 * performed using "scaled" values. A scaled value is one that's
227 * been left-shifted by the fractional width of a divider. Dividing
228 * a scaled value by a scaled divisor produces the desired quotient
229 * without loss of precision and without any other special handling
230 * for fractions.
231 *
232 * The recorded value of a variable divider can be modified. To
233 * modify either divider (or both), a clock must be enabled (i.e.,
234 * using its gate). In addition, a trigger register (described
235 * below) must be used to commit the change, and polled to verify
236 * the change is complete.
237 */
238struct bcm_clk_div {
239 union {
240 struct { /* variable divider */
241 u32 offset; /* divider register offset */
242 u32 shift; /* field shift */
243 u32 width; /* field width */
244 u32 frac_width; /* field fraction width */
245
246 u64 scaled_div; /* scaled divider value */
247 };
248 u32 fixed; /* non-zero fixed divider value */
249 };
250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
251};
252
253/*
254 * Divider flags:
255 * EXISTS means this divider exists
256 * FIXED means it is a fixed-rate divider
257 */
258#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */
259#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */
260
261/* Divider initialization macros */
262
263/* A fixed (non-zero) divider */
264#define FIXED_DIVIDER(_value) \
265 { \
266 .fixed = (_value), \
267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
268 }
269
270/* A divider with an integral divisor */
271#define DIVIDER(_offset, _shift, _width) \
272 { \
273 .offset = (_offset), \
274 .shift = (_shift), \
275 .width = (_width), \
276 .scaled_div = BAD_SCALED_DIV_VALUE, \
277 .flags = FLAG(DIV, EXISTS), \
278 }
279
280/* A divider whose divisor has an integer and fractional part */
281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
282 { \
283 .offset = (_offset), \
284 .shift = (_shift), \
285 .width = (_width), \
286 .frac_width = (_frac_width), \
287 .scaled_div = BAD_SCALED_DIV_VALUE, \
288 .flags = FLAG(DIV, EXISTS), \
289 }
290
291/*
292 * Clocks may have multiple "parent" clocks. If there is more than
293 * one, a selector must be specified to define which of the parent
294 * clocks is currently in use. The selected clock is indicated in a
295 * sub-field of a 32-bit selector register. The range of
296 * representable selector values typically exceeds the number of
297 * available parent clocks. Occasionally the reset value of a
298 * selector field is explicitly set to a (specific) value that does
299 * not correspond to a defined input clock.
300 *
301 * We register all known parent clocks with the common clock code
302 * using a packed array (i.e., no empty slots) of (parent) clock
303 * names, and refer to them later using indexes into that array.
304 * We maintain an array of selector values indexed by common clock
305 * index values in order to map between these common clock indexes
306 * and the selector values used by the hardware.
307 *
308 * Like dividers, a selector can be modified, but to do so a clock
309 * must be enabled, and a trigger must be used to commit the change.
310 */
311struct bcm_clk_sel {
312 u32 offset; /* selector register offset */
313 u32 shift; /* field shift */
314 u32 width; /* field width */
315
316 u32 parent_count; /* number of entries in parent_sel[] */
317 u32 *parent_sel; /* array of parent selector values */
318 u8 clk_index; /* current selected index in parent_sel[] */
319};
320
321/* Selector initialization macro */
322#define SELECTOR(_offset, _shift, _width) \
323 { \
324 .offset = (_offset), \
325 .shift = (_shift), \
326 .width = (_width), \
327 .clk_index = BAD_CLK_INDEX, \
328 }
329
330/*
331 * Making changes to a variable divider or a selector for a clock
332 * requires the use of a trigger. A trigger is defined by a single
333 * bit within a register. To signal a change, a 1 is written into
334 * that bit. To determine when the change has been completed, that
335 * trigger bit is polled; the read value will be 1 while the change
336 * is in progress, and 0 when it is complete.
337 *
338 * Occasionally a clock will have more than one trigger. In this
339 * case, the "pre-trigger" will be used when changing a clock's
340 * selector and/or its pre-divider.
341 */
342struct bcm_clk_trig {
343 u32 offset; /* trigger register offset */
344 u32 bit; /* trigger bit */
345 u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */
346};
347
348/*
349 * Trigger flags:
350 * EXISTS means this trigger exists
351 */
352#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */
353
354/* Trigger initialization macro */
355#define TRIGGER(_offset, _bit) \
356 { \
357 .offset = (_offset), \
358 .bit = (_bit), \
359 .flags = FLAG(TRIG, EXISTS), \
360 }
361
362struct peri_clk_data {
363 struct bcm_clk_gate gate;
364 struct bcm_clk_trig pre_trig;
365 struct bcm_clk_div pre_div;
366 struct bcm_clk_trig trig;
367 struct bcm_clk_div div;
368 struct bcm_clk_sel sel;
369 const char *clocks[]; /* must be last; use CLOCKS() to declare */
370};
371#define CLOCKS(...) { __VA_ARGS__, NULL, }
372#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */
373
374struct kona_clk {
375 struct clk_hw hw;
376 struct clk_init_data init_data;
377 const char *name; /* name of this clock */
378 struct ccu_data *ccu; /* ccu this clock is associated with */
379 enum bcm_clk_type type;
380 union {
381 void *data;
382 struct peri_clk_data *peri;
383 };
384};
385#define to_kona_clk(_hw) \
386 container_of(_hw, struct kona_clk, hw)
387
388/* Exported globals */
389
390extern struct clk_ops kona_peri_clk_ops;
391
392/* Help functions */
393
394#define PERI_CLK_SETUP(clks, ccu, id, name) \
395 clks[id] = kona_clk_setup(ccu, #name, bcm_clk_peri, &name ## _data)
396
397/* Externally visible functions */
398
399extern u64 do_div_round_closest(u64 dividend, unsigned long divisor);
400extern u64 scaled_div_max(struct bcm_clk_div *div);
401extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
402 u32 billionths);
403
404extern struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
405 enum bcm_clk_type type, void *data);
406extern void __init kona_dt_ccu_setup(struct device_node *node,
407 int (*ccu_clks_setup)(struct ccu_data *));
408extern bool __init kona_ccu_init(struct ccu_data *ccu);
409
410#endif /* _CLK_KONA_H */
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 8cbfcf88fae3..a820b0cfcf57 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -33,7 +33,7 @@ struct clk_icst {
33 struct clk_hw hw; 33 struct clk_hw hw;
34 void __iomem *vcoreg; 34 void __iomem *vcoreg;
35 void __iomem *lockreg; 35 void __iomem *lockreg;
36 const struct icst_params *params; 36 struct icst_params *params;
37 unsigned long rate; 37 unsigned long rate;
38}; 38};
39 39
@@ -84,6 +84,8 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
84 struct clk_icst *icst = to_icst(hw); 84 struct clk_icst *icst = to_icst(hw);
85 struct icst_vco vco; 85 struct icst_vco vco;
86 86
87 if (parent_rate)
88 icst->params->ref = parent_rate;
87 vco = vco_get(icst->vcoreg); 89 vco = vco_get(icst->vcoreg);
88 icst->rate = icst_hz(icst->params, vco); 90 icst->rate = icst_hz(icst->params, vco);
89 return icst->rate; 91 return icst->rate;
@@ -105,6 +107,8 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
105 struct clk_icst *icst = to_icst(hw); 107 struct clk_icst *icst = to_icst(hw);
106 struct icst_vco vco; 108 struct icst_vco vco;
107 109
110 if (parent_rate)
111 icst->params->ref = parent_rate;
108 vco = icst_hz_to_vco(icst->params, rate); 112 vco = icst_hz_to_vco(icst->params, rate);
109 icst->rate = icst_hz(icst->params, vco); 113 icst->rate = icst_hz(icst->params, vco);
110 vco_set(icst->lockreg, icst->vcoreg, vco); 114 vco_set(icst->lockreg, icst->vcoreg, vco);
@@ -120,24 +124,33 @@ static const struct clk_ops icst_ops = {
120struct clk *icst_clk_register(struct device *dev, 124struct clk *icst_clk_register(struct device *dev,
121 const struct clk_icst_desc *desc, 125 const struct clk_icst_desc *desc,
122 const char *name, 126 const char *name,
127 const char *parent_name,
123 void __iomem *base) 128 void __iomem *base)
124{ 129{
125 struct clk *clk; 130 struct clk *clk;
126 struct clk_icst *icst; 131 struct clk_icst *icst;
127 struct clk_init_data init; 132 struct clk_init_data init;
133 struct icst_params *pclone;
128 134
129 icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL); 135 icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
130 if (!icst) { 136 if (!icst) {
131 pr_err("could not allocate ICST clock!\n"); 137 pr_err("could not allocate ICST clock!\n");
132 return ERR_PTR(-ENOMEM); 138 return ERR_PTR(-ENOMEM);
133 } 139 }
140
141 pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL);
142 if (!pclone) {
143 pr_err("could not clone ICST params\n");
144 return ERR_PTR(-ENOMEM);
145 }
146
134 init.name = name; 147 init.name = name;
135 init.ops = &icst_ops; 148 init.ops = &icst_ops;
136 init.flags = CLK_IS_ROOT; 149 init.flags = CLK_IS_ROOT;
137 init.parent_names = NULL; 150 init.parent_names = (parent_name ? &parent_name : NULL);
138 init.num_parents = 0; 151 init.num_parents = (parent_name ? 1 : 0);
139 icst->hw.init = &init; 152 icst->hw.init = &init;
140 icst->params = desc->params; 153 icst->params = pclone;
141 icst->vcoreg = base + desc->vco_offset; 154 icst->vcoreg = base + desc->vco_offset;
142 icst->lockreg = base + desc->lock_offset; 155 icst->lockreg = base + desc->lock_offset;
143 156
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index be99dd0da785..04e6f0aef588 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -16,4 +16,5 @@ struct clk_icst_desc {
16struct clk *icst_clk_register(struct device *dev, 16struct clk *icst_clk_register(struct device *dev,
17 const struct clk_icst_desc *desc, 17 const struct clk_icst_desc *desc,
18 const char *name, 18 const char *name,
19 const char *parent_name,
19 void __iomem *base); 20 void __iomem *base);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 844f8d711a12..6d8b8e1a080a 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -93,13 +93,15 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
93 imc = &impd1_clks[id]; 93 imc = &impd1_clks[id];
94 94
95 imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id); 95 imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
96 clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base); 96 clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL,
97 base);
97 imc->vco1clk = clk; 98 imc->vco1clk = clk;
98 imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id); 99 imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
99 100
100 /* VCO2 is also called "CLK2" */ 101 /* VCO2 is also called "CLK2" */
101 imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id); 102 imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
102 clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base); 103 clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL,
104 base);
103 imc->vco2clk = clk; 105 imc->vco2clk = clk;
104 106
105 /* MMCI uses CLK2 right off */ 107 /* MMCI uses CLK2 right off */
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
index bda8967e09c2..734c4b8fe6ab 100644
--- a/drivers/clk/versatile/clk-integrator.c
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -10,21 +10,17 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/clkdev.h> 11#include <linux/clkdev.h>
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/platform_data/clk-integrator.h> 13#include <linux/of.h>
14 14#include <linux/of_address.h>
15#include <mach/hardware.h>
16#include <mach/platform.h>
17 15
18#include "clk-icst.h" 16#include "clk-icst.h"
19 17
20/* 18#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
21 * Implementation of the ARM Integrator/AP and Integrator/CP clock tree.
22 * Inspired by portions of:
23 * plat-versatile/clock.c and plat-versatile/include/plat/clock.h
24 */
25 19
26static const struct icst_params cp_auxvco_params = { 20/* Base offset for the core module */
27 .ref = 24000000, 21static void __iomem *cm_base;
22
23static const struct icst_params cp_auxosc_params = {
28 .vco_max = ICST525_VCO_MAX_5V, 24 .vco_max = ICST525_VCO_MAX_5V,
29 .vco_min = ICST525_VCO_MIN, 25 .vco_min = ICST525_VCO_MIN,
30 .vd_min = 8, 26 .vd_min = 8,
@@ -35,50 +31,39 @@ static const struct icst_params cp_auxvco_params = {
35 .idx2s = icst525_idx2s, 31 .idx2s = icst525_idx2s,
36}; 32};
37 33
38static const struct clk_icst_desc __initdata cp_icst_desc = { 34static const struct clk_icst_desc __initdata cm_auxosc_desc = {
39 .params = &cp_auxvco_params, 35 .params = &cp_auxosc_params,
40 .vco_offset = 0x1c, 36 .vco_offset = 0x1c,
41 .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, 37 .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET,
42}; 38};
43 39
44/* 40static void __init of_integrator_cm_osc_setup(struct device_node *np)
45 * integrator_clk_init() - set up the integrator clock tree
46 * @is_cp: pass true if it's the Integrator/CP else AP is assumed
47 */
48void __init integrator_clk_init(bool is_cp)
49{ 41{
50 struct clk *clk; 42 struct clk *clk = ERR_PTR(-EINVAL);
51 43 const char *clk_name = np->name;
52 /* APB clock dummy */ 44 const struct clk_icst_desc *desc = &cm_auxosc_desc;
53 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0); 45 const char *parent_name;
54 clk_register_clkdev(clk, "apb_pclk", NULL);
55
56 /* UART reference clock */
57 clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
58 14745600);
59 clk_register_clkdev(clk, NULL, "uart0");
60 clk_register_clkdev(clk, NULL, "uart1");
61 if (is_cp)
62 clk_register_clkdev(clk, NULL, "mmci");
63
64 /* 24 MHz clock */
65 clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
66 24000000);
67 clk_register_clkdev(clk, NULL, "kmi0");
68 clk_register_clkdev(clk, NULL, "kmi1");
69 if (!is_cp)
70 clk_register_clkdev(clk, NULL, "ap_timer");
71 46
72 if (!is_cp) 47 if (!cm_base) {
73 return; 48 /* Remap the core module base if not done yet */
49 struct device_node *parent;
74 50
75 /* 1 MHz clock */ 51 parent = of_get_parent(np);
76 clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT, 52 if (!np) {
77 1000000); 53 pr_err("no parent on core module clock\n");
78 clk_register_clkdev(clk, NULL, "sp804"); 54 return;
55 }
56 cm_base = of_iomap(parent, 0);
57 if (!cm_base) {
58 pr_err("could not remap core module base\n");
59 return;
60 }
61 }
79 62
80 /* ICST VCO clock used on the Integrator/CP CLCD */ 63 parent_name = of_clk_get_parent_name(np, 0);
81 clk = icst_clk_register(NULL, &cp_icst_desc, "icst", 64 clk = icst_clk_register(NULL, desc, clk_name, parent_name, cm_base);
82 __io_address(INTEGRATOR_HDR_BASE)); 65 if (!IS_ERR(clk))
83 clk_register_clkdev(clk, NULL, "clcd"); 66 of_clk_add_provider(np, of_clk_src_simple_get, clk);
84} 67}
68CLK_OF_DECLARE(integrator_cm_auxosc_clk,
69 "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup);
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index 747e7b31117c..c8b523117fb7 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -85,10 +85,10 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
85 /* ICST VCO clock */ 85 /* ICST VCO clock */
86 if (is_pb1176) 86 if (is_pb1176)
87 clk = icst_clk_register(NULL, &realview_osc0_desc, 87 clk = icst_clk_register(NULL, &realview_osc0_desc,
88 "osc0", sysbase); 88 "osc0", NULL, sysbase);
89 else 89 else
90 clk = icst_clk_register(NULL, &realview_osc4_desc, 90 clk = icst_clk_register(NULL, &realview_osc4_desc,
91 "osc4", sysbase); 91 "osc4", NULL, sysbase);
92 92
93 clk_register_clkdev(clk, NULL, "dev:clcd"); 93 clk_register_clkdev(clk, NULL, "dev:clcd");
94 clk_register_clkdev(clk, NULL, "issp:clcd"); 94 clk_register_clkdev(clk, NULL, "issp:clcd");
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cd6950fd8caf..6510ec4f45ff 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -140,3 +140,6 @@ config VF_PIT_TIMER
140 bool 140 bool
141 help 141 help
142 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. 142 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
143
144config CLKSRC_QCOM
145 bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index c7ca50a9c232..2e0c0cc0a014 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
32obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o 32obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
33obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o 33obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
34obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o 34obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
35obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
35 36
36obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 37obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
37obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 38obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
new file mode 100644
index 000000000000..e807acf4c665
--- /dev/null
+++ b/drivers/clocksource/qcom-timer.c
@@ -0,0 +1,330 @@
1/*
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/clocksource.h>
18#include <linux/clockchips.h>
19#include <linux/cpu.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/sched_clock.h>
28
29#define TIMER_MATCH_VAL 0x0000
30#define TIMER_COUNT_VAL 0x0004
31#define TIMER_ENABLE 0x0008
32#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
33#define TIMER_ENABLE_EN BIT(0)
34#define TIMER_CLEAR 0x000C
35#define DGT_CLK_CTL 0x10
36#define DGT_CLK_CTL_DIV_4 0x3
37#define TIMER_STS_GPT0_CLR_PEND BIT(10)
38
39#define GPT_HZ 32768
40
41#define MSM_DGT_SHIFT 5
42
43static void __iomem *event_base;
44static void __iomem *sts_base;
45
46static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
47{
48 struct clock_event_device *evt = dev_id;
49 /* Stop the timer tick */
50 if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
51 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
52 ctrl &= ~TIMER_ENABLE_EN;
53 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
54 }
55 evt->event_handler(evt);
56 return IRQ_HANDLED;
57}
58
59static int msm_timer_set_next_event(unsigned long cycles,
60 struct clock_event_device *evt)
61{
62 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
63
64 ctrl &= ~TIMER_ENABLE_EN;
65 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
66
67 writel_relaxed(ctrl, event_base + TIMER_CLEAR);
68 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
69
70 if (sts_base)
71 while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
72 cpu_relax();
73
74 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
75 return 0;
76}
77
78static void msm_timer_set_mode(enum clock_event_mode mode,
79 struct clock_event_device *evt)
80{
81 u32 ctrl;
82
83 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
84 ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
85
86 switch (mode) {
87 case CLOCK_EVT_MODE_RESUME:
88 case CLOCK_EVT_MODE_PERIODIC:
89 break;
90 case CLOCK_EVT_MODE_ONESHOT:
91 /* Timer is enabled in set_next_event */
92 break;
93 case CLOCK_EVT_MODE_UNUSED:
94 case CLOCK_EVT_MODE_SHUTDOWN:
95 break;
96 }
97 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
98}
99
100static struct clock_event_device __percpu *msm_evt;
101
102static void __iomem *source_base;
103
104static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
105{
106 return readl_relaxed(source_base + TIMER_COUNT_VAL);
107}
108
109static struct clocksource msm_clocksource = {
110 .name = "dg_timer",
111 .rating = 300,
112 .read = msm_read_timer_count,
113 .mask = CLOCKSOURCE_MASK(32),
114 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
115};
116
117static int msm_timer_irq;
118static int msm_timer_has_ppi;
119
120static int msm_local_timer_setup(struct clock_event_device *evt)
121{
122 int cpu = smp_processor_id();
123 int err;
124
125 evt->irq = msm_timer_irq;
126 evt->name = "msm_timer";
127 evt->features = CLOCK_EVT_FEAT_ONESHOT;
128 evt->rating = 200;
129 evt->set_mode = msm_timer_set_mode;
130 evt->set_next_event = msm_timer_set_next_event;
131 evt->cpumask = cpumask_of(cpu);
132
133 clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
134
135 if (msm_timer_has_ppi) {
136 enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
137 } else {
138 err = request_irq(evt->irq, msm_timer_interrupt,
139 IRQF_TIMER | IRQF_NOBALANCING |
140 IRQF_TRIGGER_RISING, "gp_timer", evt);
141 if (err)
142 pr_err("request_irq failed\n");
143 }
144
145 return 0;
146}
147
148static void msm_local_timer_stop(struct clock_event_device *evt)
149{
150 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
151 disable_percpu_irq(evt->irq);
152}
153
154static int msm_timer_cpu_notify(struct notifier_block *self,
155 unsigned long action, void *hcpu)
156{
157 /*
158 * Grab cpu pointer in each case to avoid spurious
159 * preemptible warnings
160 */
161 switch (action & ~CPU_TASKS_FROZEN) {
162 case CPU_STARTING:
163 msm_local_timer_setup(this_cpu_ptr(msm_evt));
164 break;
165 case CPU_DYING:
166 msm_local_timer_stop(this_cpu_ptr(msm_evt));
167 break;
168 }
169
170 return NOTIFY_OK;
171}
172
173static struct notifier_block msm_timer_cpu_nb = {
174 .notifier_call = msm_timer_cpu_notify,
175};
176
177static u64 notrace msm_sched_clock_read(void)
178{
179 return msm_clocksource.read(&msm_clocksource);
180}
181
182static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
183 bool percpu)
184{
185 struct clocksource *cs = &msm_clocksource;
186 int res = 0;
187
188 msm_timer_irq = irq;
189 msm_timer_has_ppi = percpu;
190
191 msm_evt = alloc_percpu(struct clock_event_device);
192 if (!msm_evt) {
193 pr_err("memory allocation failed for clockevents\n");
194 goto err;
195 }
196
197 if (percpu)
198 res = request_percpu_irq(irq, msm_timer_interrupt,
199 "gp_timer", msm_evt);
200
201 if (res) {
202 pr_err("request_percpu_irq failed\n");
203 } else {
204 res = register_cpu_notifier(&msm_timer_cpu_nb);
205 if (res) {
206 free_percpu_irq(irq, msm_evt);
207 goto err;
208 }
209
210 /* Immediately configure the timer on the boot CPU */
211 msm_local_timer_setup(__this_cpu_ptr(msm_evt));
212 }
213
214err:
215 writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
216 res = clocksource_register_hz(cs, dgt_hz);
217 if (res)
218 pr_err("clocksource_register failed\n");
219 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
220}
221
222#ifdef CONFIG_ARCH_QCOM
223static void __init msm_dt_timer_init(struct device_node *np)
224{
225 u32 freq;
226 int irq;
227 struct resource res;
228 u32 percpu_offset;
229 void __iomem *base;
230 void __iomem *cpu0_base;
231
232 base = of_iomap(np, 0);
233 if (!base) {
234 pr_err("Failed to map event base\n");
235 return;
236 }
237
238 /* We use GPT0 for the clockevent */
239 irq = irq_of_parse_and_map(np, 1);
240 if (irq <= 0) {
241 pr_err("Can't get irq\n");
242 return;
243 }
244
245 /* We use CPU0's DGT for the clocksource */
246 if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
247 percpu_offset = 0;
248
249 if (of_address_to_resource(np, 0, &res)) {
250 pr_err("Failed to parse DGT resource\n");
251 return;
252 }
253
254 cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
255 if (!cpu0_base) {
256 pr_err("Failed to map source base\n");
257 return;
258 }
259
260 if (of_property_read_u32(np, "clock-frequency", &freq)) {
261 pr_err("Unknown frequency\n");
262 return;
263 }
264
265 event_base = base + 0x4;
266 sts_base = base + 0x88;
267 source_base = cpu0_base + 0x24;
268 freq /= 4;
269 writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
270
271 msm_timer_init(freq, 32, irq, !!percpu_offset);
272}
273CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
274CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
275#else
276
277static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source,
278 u32 sts)
279{
280 void __iomem *base;
281
282 base = ioremap(addr, SZ_256);
283 if (!base) {
284 pr_err("Failed to map timer base\n");
285 return -ENOMEM;
286 }
287 event_base = base + event;
288 source_base = base + source;
289 if (sts)
290 sts_base = base + sts;
291
292 return 0;
293}
294
295static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
296{
297 /*
298 * Shift timer count down by a constant due to unreliable lower bits
299 * on some targets.
300 */
301 return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
302}
303
304void __init msm7x01_timer_init(void)
305{
306 struct clocksource *cs = &msm_clocksource;
307
308 if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0))
309 return;
310 cs->read = msm_read_timer_count_shift;
311 cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
312 /* 600 KHz */
313 msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7,
314 false);
315}
316
317void __init msm7x30_timer_init(void)
318{
319 if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80))
320 return;
321 msm_timer_init(24576000 / 4, 32, 1, false);
322}
323
324void __init qsd8x50_timer_init(void)
325{
326 if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34))
327 return;
328 msm_timer_init(19200000 / 4, 32, 7, false);
329}
330#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 903f24d28ba0..2c38d950a1e5 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -192,7 +192,7 @@ config GPIO_MSM_V1
192 192
193config GPIO_MSM_V2 193config GPIO_MSM_V2
194 tristate "Qualcomm MSM GPIO v2" 194 tristate "Qualcomm MSM GPIO v2"
195 depends on GPIOLIB && OF && ARCH_MSM 195 depends on GPIOLIB && OF && ARCH_QCOM
196 help 196 help
197 Say yes here to support the GPIO interface on ARM v7 based 197 Say yes here to support the GPIO interface on ARM v7 based
198 Qualcomm MSM chips. Most of the pins on the MSM can be 198 Qualcomm MSM chips. Most of the pins on the MSM can be
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index c69d1e07a3a6..b6984971ce0c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@ config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on MSM_IOMMU 5 depends on MSM_IOMMU
6 depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST) 6 depends on ARCH_MSM8960 || (ARM && COMPILE_TEST)
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select SHMEM 8 select SHMEM
9 select TMPFS 9 select TMPFS
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 61ffdca96e25..111068782da4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -69,3 +69,11 @@ config VERSATILE_FPGA_IRQ_NR
69config XTENSA_MX 69config XTENSA_MX
70 bool 70 bool
71 select IRQ_DOMAIN 71 select IRQ_DOMAIN
72
73config IRQ_CROSSBAR
74 bool
75 help
76 Support for a CROSSBAR ip that preceeds the main interrupt controller.
77 The primary irqchip invokes the crossbar's callback which inturn allocates
78 a free irq and configures the IP. Thus the peripheral interrupts are
79 routed to one of the free irqchip interrupt lines.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 5194afb39e78..cb37e5777f18 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
26obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o 26obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
27obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o 27obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
28obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o 28obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
29obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
new file mode 100644
index 000000000000..fc817d28d1fe
--- /dev/null
+++ b/drivers/irqchip/irq-crossbar.c
@@ -0,0 +1,208 @@
1/*
2 * drivers/irqchip/irq-crossbar.c
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
5 * Author: Sricharan R <r.sricharan@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16#include <linux/slab.h>
17#include <linux/irqchip/arm-gic.h>
18
19#define IRQ_FREE -1
20#define GIC_IRQ_START 32
21
22/*
23 * @int_max: maximum number of supported interrupts
24 * @irq_map: array of interrupts to crossbar number mapping
25 * @crossbar_base: crossbar base address
26 * @register_offsets: offsets for each irq number
27 */
28struct crossbar_device {
29 uint int_max;
30 uint *irq_map;
31 void __iomem *crossbar_base;
32 int *register_offsets;
33 void (*write) (int, int);
34};
35
36static struct crossbar_device *cb;
37
38static inline void crossbar_writel(int irq_no, int cb_no)
39{
40 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
41}
42
43static inline void crossbar_writew(int irq_no, int cb_no)
44{
45 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
46}
47
48static inline void crossbar_writeb(int irq_no, int cb_no)
49{
50 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
51}
52
53static inline int allocate_free_irq(int cb_no)
54{
55 int i;
56
57 for (i = 0; i < cb->int_max; i++) {
58 if (cb->irq_map[i] == IRQ_FREE) {
59 cb->irq_map[i] = cb_no;
60 return i;
61 }
62 }
63
64 return -ENODEV;
65}
66
67static int crossbar_domain_map(struct irq_domain *d, unsigned int irq,
68 irq_hw_number_t hw)
69{
70 cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]);
71 return 0;
72}
73
74static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq)
75{
76 irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq;
77
78 if (hw > GIC_IRQ_START)
79 cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE;
80}
81
82static int crossbar_domain_xlate(struct irq_domain *d,
83 struct device_node *controller,
84 const u32 *intspec, unsigned int intsize,
85 unsigned long *out_hwirq,
86 unsigned int *out_type)
87{
88 unsigned long ret;
89
90 ret = allocate_free_irq(intspec[1]);
91
92 if (IS_ERR_VALUE(ret))
93 return ret;
94
95 *out_hwirq = ret + GIC_IRQ_START;
96 return 0;
97}
98
99const struct irq_domain_ops routable_irq_domain_ops = {
100 .map = crossbar_domain_map,
101 .unmap = crossbar_domain_unmap,
102 .xlate = crossbar_domain_xlate
103};
104
105static int __init crossbar_of_init(struct device_node *node)
106{
107 int i, size, max, reserved = 0, entry;
108 const __be32 *irqsr;
109
110 cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
111
112 if (!cb)
113 return -ENOMEM;
114
115 cb->crossbar_base = of_iomap(node, 0);
116 if (!cb->crossbar_base)
117 goto err1;
118
119 of_property_read_u32(node, "ti,max-irqs", &max);
120 cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL);
121 if (!cb->irq_map)
122 goto err2;
123
124 cb->int_max = max;
125
126 for (i = 0; i < max; i++)
127 cb->irq_map[i] = IRQ_FREE;
128
129 /* Get and mark reserved irqs */
130 irqsr = of_get_property(node, "ti,irqs-reserved", &size);
131 if (irqsr) {
132 size /= sizeof(__be32);
133
134 for (i = 0; i < size; i++) {
135 of_property_read_u32_index(node,
136 "ti,irqs-reserved",
137 i, &entry);
138 if (entry > max) {
139 pr_err("Invalid reserved entry\n");
140 goto err3;
141 }
142 cb->irq_map[entry] = 0;
143 }
144 }
145
146 cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL);
147 if (!cb->register_offsets)
148 goto err3;
149
150 of_property_read_u32(node, "ti,reg-size", &size);
151
152 switch (size) {
153 case 1:
154 cb->write = crossbar_writeb;
155 break;
156 case 2:
157 cb->write = crossbar_writew;
158 break;
159 case 4:
160 cb->write = crossbar_writel;
161 break;
162 default:
163 pr_err("Invalid reg-size property\n");
164 goto err4;
165 break;
166 }
167
168 /*
169 * Register offsets are not linear because of the
170 * reserved irqs. so find and store the offsets once.
171 */
172 for (i = 0; i < max; i++) {
173 if (!cb->irq_map[i])
174 continue;
175
176 cb->register_offsets[i] = reserved;
177 reserved += size;
178 }
179
180 register_routable_domain_ops(&routable_irq_domain_ops);
181 return 0;
182
183err4:
184 kfree(cb->register_offsets);
185err3:
186 kfree(cb->irq_map);
187err2:
188 iounmap(cb->crossbar_base);
189err1:
190 kfree(cb);
191 return -ENOMEM;
192}
193
194static const struct of_device_id crossbar_match[] __initconst = {
195 { .compatible = "ti,irq-crossbar" },
196 {}
197};
198
199int __init irqcrossbar_init(void)
200{
201 struct device_node *np;
202 np = of_find_matching_node(NULL, crossbar_match);
203 if (!np)
204 return -ENODEV;
205
206 crossbar_of_init(np);
207 return 0;
208}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 341c6016812d..07a7050841ec 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -824,16 +824,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
824 irq_set_chip_and_handler(irq, &gic_chip, 824 irq_set_chip_and_handler(irq, &gic_chip,
825 handle_fasteoi_irq); 825 handle_fasteoi_irq);
826 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 826 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
827
828 gic_routable_irq_domain_ops->map(d, irq, hw);
827 } 829 }
828 irq_set_chip_data(irq, d->host_data); 830 irq_set_chip_data(irq, d->host_data);
829 return 0; 831 return 0;
830} 832}
831 833
834static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
835{
836 gic_routable_irq_domain_ops->unmap(d, irq);
837}
838
832static int gic_irq_domain_xlate(struct irq_domain *d, 839static int gic_irq_domain_xlate(struct irq_domain *d,
833 struct device_node *controller, 840 struct device_node *controller,
834 const u32 *intspec, unsigned int intsize, 841 const u32 *intspec, unsigned int intsize,
835 unsigned long *out_hwirq, unsigned int *out_type) 842 unsigned long *out_hwirq, unsigned int *out_type)
836{ 843{
844 unsigned long ret = 0;
845
837 if (d->of_node != controller) 846 if (d->of_node != controller)
838 return -EINVAL; 847 return -EINVAL;
839 if (intsize < 3) 848 if (intsize < 3)
@@ -843,11 +852,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
843 *out_hwirq = intspec[1] + 16; 852 *out_hwirq = intspec[1] + 16;
844 853
845 /* For SPIs, we need to add 16 more to get the GIC irq ID number */ 854 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
846 if (!intspec[0]) 855 if (!intspec[0]) {
847 *out_hwirq += 16; 856 ret = gic_routable_irq_domain_ops->xlate(d, controller,
857 intspec,
858 intsize,
859 out_hwirq,
860 out_type);
861
862 if (IS_ERR_VALUE(ret))
863 return ret;
864 }
848 865
849 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 866 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
850 return 0; 867
868 return ret;
851} 869}
852 870
853#ifdef CONFIG_SMP 871#ifdef CONFIG_SMP
@@ -871,9 +889,41 @@ static struct notifier_block gic_cpu_notifier = {
871 889
872const struct irq_domain_ops gic_irq_domain_ops = { 890const struct irq_domain_ops gic_irq_domain_ops = {
873 .map = gic_irq_domain_map, 891 .map = gic_irq_domain_map,
892 .unmap = gic_irq_domain_unmap,
874 .xlate = gic_irq_domain_xlate, 893 .xlate = gic_irq_domain_xlate,
875}; 894};
876 895
896/* Default functions for routable irq domain */
897static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq,
898 irq_hw_number_t hw)
899{
900 return 0;
901}
902
903static void gic_routable_irq_domain_unmap(struct irq_domain *d,
904 unsigned int irq)
905{
906}
907
908static int gic_routable_irq_domain_xlate(struct irq_domain *d,
909 struct device_node *controller,
910 const u32 *intspec, unsigned int intsize,
911 unsigned long *out_hwirq,
912 unsigned int *out_type)
913{
914 *out_hwirq += 16;
915 return 0;
916}
917
918const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
919 .map = gic_routable_irq_domain_map,
920 .unmap = gic_routable_irq_domain_unmap,
921 .xlate = gic_routable_irq_domain_xlate,
922};
923
924const struct irq_domain_ops *gic_routable_irq_domain_ops =
925 &gic_default_routable_irq_domain_ops;
926
877void __init gic_init_bases(unsigned int gic_nr, int irq_start, 927void __init gic_init_bases(unsigned int gic_nr, int irq_start,
878 void __iomem *dist_base, void __iomem *cpu_base, 928 void __iomem *dist_base, void __iomem *cpu_base,
879 u32 percpu_offset, struct device_node *node) 929 u32 percpu_offset, struct device_node *node)
@@ -881,6 +931,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
881 irq_hw_number_t hwirq_base; 931 irq_hw_number_t hwirq_base;
882 struct gic_chip_data *gic; 932 struct gic_chip_data *gic;
883 int gic_irqs, irq_base, i; 933 int gic_irqs, irq_base, i;
934 int nr_routable_irqs;
884 935
885 BUG_ON(gic_nr >= MAX_GIC_NR); 936 BUG_ON(gic_nr >= MAX_GIC_NR);
886 937
@@ -946,14 +997,25 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
946 gic->gic_irqs = gic_irqs; 997 gic->gic_irqs = gic_irqs;
947 998
948 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 999 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
949 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); 1000
950 if (IS_ERR_VALUE(irq_base)) { 1001 if (of_property_read_u32(node, "arm,routable-irqs",
951 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1002 &nr_routable_irqs)) {
952 irq_start); 1003 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
953 irq_base = irq_start; 1004 numa_node_id());
1005 if (IS_ERR_VALUE(irq_base)) {
1006 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
1007 irq_start);
1008 irq_base = irq_start;
1009 }
1010
1011 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
1012 hwirq_base, &gic_irq_domain_ops, gic);
1013 } else {
1014 gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
1015 &gic_irq_domain_ops,
1016 gic);
954 } 1017 }
955 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, 1018
956 hwirq_base, &gic_irq_domain_ops, gic);
957 if (WARN_ON(!gic->domain)) 1019 if (WARN_ON(!gic->domain))
958 return; 1020 return;
959 1021
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d40031884..8e41be62812e 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
111static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 111static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
112{ 112{
113 struct irq_domain *d = irq_get_handler_data(irq); 113 struct irq_domain *d = irq_get_handler_data(irq);
114 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq); 114
115 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
115 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & 116 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
116 gc->mask_cache; 117 gc->mask_cache;
117 118
@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
123 } 124 }
124} 125}
125 126
127/*
128 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
129 * To avoid interrupt events on stale irqs, we clear them before unmask.
130 */
131static unsigned int orion_bridge_irq_startup(struct irq_data *d)
132{
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134
135 ct->chip.irq_ack(d);
136 ct->chip.irq_unmask(d);
137 return 0;
138}
139
126static int __init orion_bridge_irq_init(struct device_node *np, 140static int __init orion_bridge_irq_init(struct device_node *np,
127 struct device_node *parent) 141 struct device_node *parent)
128{ 142{
@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
143 } 157 }
144 158
145 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, 159 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
146 handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 160 handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
147 if (ret) { 161 if (ret) {
148 pr_err("%s: unable to alloc irq domain gc\n", np->name); 162 pr_err("%s: unable to alloc irq domain gc\n", np->name);
149 return ret; 163 return ret;
@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
176 190
177 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; 191 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
178 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; 192 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
193 gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
179 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; 194 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
180 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 195 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
181 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 196 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
182 197
183 /* mask all interrupts */ 198 /* mask and clear all interrupts */
184 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); 199 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
200 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
185 201
186 irq_set_handler_data(irq, domain); 202 irq_set_handler_data(irq, domain);
187 irq_set_chained_handler(irq, orion_bridge_irq_handler); 203 irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 8e21ae0bab46..fd2c980e4cea 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -57,6 +57,7 @@
57 57
58/** 58/**
59 * struct vic_device - VIC PM device 59 * struct vic_device - VIC PM device
60 * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
60 * @irq: The IRQ number for the base of the VIC. 61 * @irq: The IRQ number for the base of the VIC.
61 * @base: The register base for the VIC. 62 * @base: The register base for the VIC.
62 * @valid_sources: A bitmask of valid interrupts 63 * @valid_sources: A bitmask of valid interrupts
@@ -224,6 +225,17 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
224 return handled; 225 return handled;
225} 226}
226 227
228static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
229{
230 u32 stat, hwirq;
231 struct vic_device *vic = irq_desc_get_handler_data(desc);
232
233 while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
234 hwirq = ffs(stat) - 1;
235 generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
236 }
237}
238
227/* 239/*
228 * Keep iterating over all registered VIC's until there are no pending 240 * Keep iterating over all registered VIC's until there are no pending
229 * interrupts. 241 * interrupts.
@@ -246,6 +258,7 @@ static struct irq_domain_ops vic_irqdomain_ops = {
246/** 258/**
247 * vic_register() - Register a VIC. 259 * vic_register() - Register a VIC.
248 * @base: The base address of the VIC. 260 * @base: The base address of the VIC.
261 * @parent_irq: The parent IRQ if cascaded, else 0.
249 * @irq: The base IRQ for the VIC. 262 * @irq: The base IRQ for the VIC.
250 * @valid_sources: bitmask of valid interrupts 263 * @valid_sources: bitmask of valid interrupts
251 * @resume_sources: bitmask of interrupts allowed for resume sources. 264 * @resume_sources: bitmask of interrupts allowed for resume sources.
@@ -257,7 +270,8 @@ static struct irq_domain_ops vic_irqdomain_ops = {
257 * 270 *
258 * This also configures the IRQ domain for the VIC. 271 * This also configures the IRQ domain for the VIC.
259 */ 272 */
260static void __init vic_register(void __iomem *base, unsigned int irq, 273static void __init vic_register(void __iomem *base, unsigned int parent_irq,
274 unsigned int irq,
261 u32 valid_sources, u32 resume_sources, 275 u32 valid_sources, u32 resume_sources,
262 struct device_node *node) 276 struct device_node *node)
263{ 277{
@@ -273,15 +287,25 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
273 v->base = base; 287 v->base = base;
274 v->valid_sources = valid_sources; 288 v->valid_sources = valid_sources;
275 v->resume_sources = resume_sources; 289 v->resume_sources = resume_sources;
276 v->irq = irq;
277 set_handle_irq(vic_handle_irq); 290 set_handle_irq(vic_handle_irq);
278 vic_id++; 291 vic_id++;
292
293 if (parent_irq) {
294 irq_set_handler_data(parent_irq, v);
295 irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
296 }
297
279 v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, 298 v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
280 &vic_irqdomain_ops, v); 299 &vic_irqdomain_ops, v);
281 /* create an IRQ mapping for each valid IRQ */ 300 /* create an IRQ mapping for each valid IRQ */
282 for (i = 0; i < fls(valid_sources); i++) 301 for (i = 0; i < fls(valid_sources); i++)
283 if (valid_sources & (1 << i)) 302 if (valid_sources & (1 << i))
284 irq_create_mapping(v->domain, i); 303 irq_create_mapping(v->domain, i);
304 /* If no base IRQ was passed, figure out our allocated base */
305 if (irq)
306 v->irq = irq;
307 else
308 v->irq = irq_find_mapping(v->domain, 0);
285} 309}
286 310
287static void vic_ack_irq(struct irq_data *d) 311static void vic_ack_irq(struct irq_data *d)
@@ -409,10 +433,10 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
409 writel(32, base + VIC_PL190_DEF_VECT_ADDR); 433 writel(32, base + VIC_PL190_DEF_VECT_ADDR);
410 } 434 }
411 435
412 vic_register(base, irq_start, vic_sources, 0, node); 436 vic_register(base, 0, irq_start, vic_sources, 0, node);
413} 437}
414 438
415void __init __vic_init(void __iomem *base, int irq_start, 439void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
416 u32 vic_sources, u32 resume_sources, 440 u32 vic_sources, u32 resume_sources,
417 struct device_node *node) 441 struct device_node *node)
418{ 442{
@@ -449,7 +473,7 @@ void __init __vic_init(void __iomem *base, int irq_start,
449 473
450 vic_init2(base); 474 vic_init2(base);
451 475
452 vic_register(base, irq_start, vic_sources, resume_sources, node); 476 vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
453} 477}
454 478
455/** 479/**
@@ -462,8 +486,30 @@ void __init __vic_init(void __iomem *base, int irq_start,
462void __init vic_init(void __iomem *base, unsigned int irq_start, 486void __init vic_init(void __iomem *base, unsigned int irq_start,
463 u32 vic_sources, u32 resume_sources) 487 u32 vic_sources, u32 resume_sources)
464{ 488{
465 __vic_init(base, irq_start, vic_sources, resume_sources, NULL); 489 __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
490}
491
492/**
493 * vic_init_cascaded() - initialise a cascaded vectored interrupt controller
494 * @base: iomem base address
495 * @parent_irq: the parent IRQ we're cascaded off
496 * @irq_start: starting interrupt number, must be muliple of 32
497 * @vic_sources: bitmask of interrupt sources to allow
498 * @resume_sources: bitmask of interrupt sources to allow for resume
499 *
500 * This returns the base for the new interrupts or negative on error.
501 */
502int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
503 u32 vic_sources, u32 resume_sources)
504{
505 struct vic_device *v;
506
507 v = &vic_devices[vic_id];
508 __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL);
509 /* Return out acquired base */
510 return v->irq;
466} 511}
512EXPORT_SYMBOL_GPL(vic_init_cascaded);
467 513
468#ifdef CONFIG_OF 514#ifdef CONFIG_OF
469int __init vic_of_init(struct device_node *node, struct device_node *parent) 515int __init vic_of_init(struct device_node *node, struct device_node *parent)
@@ -485,7 +531,7 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
485 /* 531 /*
486 * Passing 0 as first IRQ makes the simple domain allocate descriptors 532 * Passing 0 as first IRQ makes the simple domain allocate descriptors
487 */ 533 */
488 __vic_init(regs, 0, interrupt_mask, wakeup_mask, node); 534 __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
489 535
490 return 0; 536 return 0;
491} 537}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index a4989ec6292e..8eb6a36f125a 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -746,28 +746,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
746 goto err_clk_enable; 746 goto err_clk_enable;
747 } 747 }
748 748
749 /*
750 * Setup Async configuration register in case we did not boot from
751 * NAND and so bootloader did not bother to set it up.
752 */
753 val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
754
755 /* Extended Wait is not valid and Select Strobe mode is not used */
756 val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
757 if (info->chip.options & NAND_BUSWIDTH_16)
758 val |= 0x1;
759
760 davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
761
762 ret = 0;
763 if (info->timing)
764 ret = davinci_aemif_setup_timing(info->timing, info->base,
765 info->core_chipsel);
766 if (ret < 0) {
767 dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
768 goto err;
769 }
770
771 spin_lock_irq(&davinci_nand_lock); 749 spin_lock_irq(&davinci_nand_lock);
772 750
773 /* put CSxNAND into NAND mode */ 751 /* put CSxNAND into NAND mode */
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 6d452a78b19c..fa0e4e057b99 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -22,7 +22,7 @@ config POWER_RESET_GPIO
22 22
23config POWER_RESET_MSM 23config POWER_RESET_MSM
24 bool "Qualcomm MSM power-off driver" 24 bool "Qualcomm MSM power-off driver"
25 depends on POWER_RESET && ARCH_MSM 25 depends on POWER_RESET && ARCH_QCOM
26 help 26 help
27 Power off and restart support for Qualcomm boards. 27 Power off and restart support for Qualcomm boards.
28 28
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 1ebe67cd1833..7442bc130055 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk)
36 iowrite32(value, clk->mapped_reg); 36 iowrite32(value, clk->mapped_reg);
37} 37}
38 38
39static unsigned int r8(const void __iomem *addr)
40{
41 return ioread8(addr);
42}
43
44static unsigned int r16(const void __iomem *addr)
45{
46 return ioread16(addr);
47}
48
49static unsigned int r32(const void __iomem *addr)
50{
51 return ioread32(addr);
52}
53
39static int sh_clk_mstp_enable(struct clk *clk) 54static int sh_clk_mstp_enable(struct clk *clk)
40{ 55{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); 56 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
57 if (clk->status_reg) {
58 unsigned int (*read)(const void __iomem *addr);
59 int i;
60 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
61 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
62
63 if (clk->flags & CLK_ENABLE_REG_8BIT)
64 read = r8;
65 else if (clk->flags & CLK_ENABLE_REG_16BIT)
66 read = r16;
67 else
68 read = r32;
69
70 for (i = 1000;
71 (read(mapped_status) & (1 << clk->enable_bit)) && i;
72 i--)
73 cpu_relax();
74 if (!i) {
75 pr_err("cpg: failed to enable %p[%d]\n",
76 clk->enable_reg, clk->enable_bit);
77 return -ETIMEDOUT;
78 }
79 }
42 return 0; 80 return 0;
43} 81}
44 82
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a3815eaed421..ce9b12d38786 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1024,7 +1024,7 @@ config SERIAL_SGI_IOC3
1024 1024
1025config SERIAL_MSM 1025config SERIAL_MSM
1026 bool "MSM on-chip serial port support" 1026 bool "MSM on-chip serial port support"
1027 depends on ARCH_MSM 1027 depends on ARCH_MSM || ARCH_QCOM
1028 select SERIAL_CORE 1028 select SERIAL_CORE
1029 1029
1030config SERIAL_MSM_CONSOLE 1030config SERIAL_MSM_CONSOLE
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79d25894343a..f1ff408c4b17 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -292,7 +292,7 @@ config DAVINCI_WATCHDOG
292 292
293config ORION_WATCHDOG 293config ORION_WATCHDOG
294 tristate "Orion watchdog" 294 tristate "Orion watchdog"
295 depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE 295 depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE || ARCH_MVEBU
296 select WATCHDOG_CORE 296 select WATCHDOG_CORE
297 help 297 help
298 Say Y here if to include support for the watchdog timer 298 Say Y here if to include support for the watchdog timer
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index f7722a424676..6f9b4c6e9bca 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -19,101 +19,204 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/watchdog.h> 20#include <linux/watchdog.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/interrupt.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <linux/spinlock.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <mach/bridge-regs.h> 27#include <linux/of_device.h>
28
29/* RSTOUT mask register physical address for Orion5x, Kirkwood and Dove */
30#define ORION_RSTOUT_MASK_OFFSET 0x20108
31
32/* Internal registers can be configured at any 1 MiB aligned address */
33#define INTERNAL_REGS_MASK ~(SZ_1M - 1)
28 34
29/* 35/*
30 * Watchdog timer block registers. 36 * Watchdog timer block registers.
31 */ 37 */
32#define TIMER_CTRL 0x0000 38#define TIMER_CTRL 0x0000
33#define WDT_EN 0x0010 39#define TIMER_A370_STATUS 0x04
34#define WDT_VAL 0x0024
35 40
36#define WDT_MAX_CYCLE_COUNT 0xffffffff 41#define WDT_MAX_CYCLE_COUNT 0xffffffff
37#define WDT_IN_USE 0
38#define WDT_OK_TO_CLOSE 1
39 42
40#define WDT_RESET_OUT_EN BIT(1) 43#define WDT_A370_RATIO_MASK(v) ((v) << 16)
41#define WDT_INT_REQ BIT(3) 44#define WDT_A370_RATIO_SHIFT 5
45#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
46
47#define WDT_AXP_FIXED_ENABLE_BIT BIT(10)
48#define WDT_A370_EXPIRED BIT(31)
42 49
43static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
44static int heartbeat = -1; /* module parameter (seconds) */ 51static int heartbeat = -1; /* module parameter (seconds) */
45static unsigned int wdt_max_duration; /* (seconds) */
46static struct clk *clk;
47static unsigned int wdt_tclk;
48static void __iomem *wdt_reg;
49static DEFINE_SPINLOCK(wdt_lock);
50 52
51static int orion_wdt_ping(struct watchdog_device *wdt_dev) 53struct orion_watchdog;
54
55struct orion_watchdog_data {
56 int wdt_counter_offset;
57 int wdt_enable_bit;
58 int rstout_enable_bit;
59 int (*clock_init)(struct platform_device *,
60 struct orion_watchdog *);
61 int (*start)(struct watchdog_device *);
62};
63
64struct orion_watchdog {
65 struct watchdog_device wdt;
66 void __iomem *reg;
67 void __iomem *rstout;
68 unsigned long clk_rate;
69 struct clk *clk;
70 const struct orion_watchdog_data *data;
71};
72
73static int orion_wdt_clock_init(struct platform_device *pdev,
74 struct orion_watchdog *dev)
52{ 75{
53 spin_lock(&wdt_lock); 76 int ret;
54 77
55 /* Reload watchdog duration */ 78 dev->clk = clk_get(&pdev->dev, NULL);
56 writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL); 79 if (IS_ERR(dev->clk))
80 return PTR_ERR(dev->clk);
81 ret = clk_prepare_enable(dev->clk);
82 if (ret) {
83 clk_put(dev->clk);
84 return ret;
85 }
57 86
58 spin_unlock(&wdt_lock); 87 dev->clk_rate = clk_get_rate(dev->clk);
59 return 0; 88 return 0;
60} 89}
61 90
62static int orion_wdt_start(struct watchdog_device *wdt_dev) 91static int armada370_wdt_clock_init(struct platform_device *pdev,
92 struct orion_watchdog *dev)
63{ 93{
64 u32 reg; 94 int ret;
65 95
66 spin_lock(&wdt_lock); 96 dev->clk = clk_get(&pdev->dev, NULL);
97 if (IS_ERR(dev->clk))
98 return PTR_ERR(dev->clk);
99 ret = clk_prepare_enable(dev->clk);
100 if (ret) {
101 clk_put(dev->clk);
102 return ret;
103 }
104
105 /* Setup watchdog input clock */
106 atomic_io_modify(dev->reg + TIMER_CTRL,
107 WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT),
108 WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT));
109
110 dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
111 return 0;
112}
113
114static int armadaxp_wdt_clock_init(struct platform_device *pdev,
115 struct orion_watchdog *dev)
116{
117 int ret;
118
119 dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
120 if (IS_ERR(dev->clk))
121 return PTR_ERR(dev->clk);
122 ret = clk_prepare_enable(dev->clk);
123 if (ret) {
124 clk_put(dev->clk);
125 return ret;
126 }
127
128 /* Enable the fixed watchdog clock input */
129 atomic_io_modify(dev->reg + TIMER_CTRL,
130 WDT_AXP_FIXED_ENABLE_BIT,
131 WDT_AXP_FIXED_ENABLE_BIT);
132
133 dev->clk_rate = clk_get_rate(dev->clk);
134 return 0;
135}
136
137static int orion_wdt_ping(struct watchdog_device *wdt_dev)
138{
139 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
140 /* Reload watchdog duration */
141 writel(dev->clk_rate * wdt_dev->timeout,
142 dev->reg + dev->data->wdt_counter_offset);
143 return 0;
144}
145
146static int armada370_start(struct watchdog_device *wdt_dev)
147{
148 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
67 149
68 /* Set watchdog duration */ 150 /* Set watchdog duration */
69 writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL); 151 writel(dev->clk_rate * wdt_dev->timeout,
152 dev->reg + dev->data->wdt_counter_offset);
70 153
71 /* Clear watchdog timer interrupt */ 154 /* Clear the watchdog expiration bit */
72 writel(~WDT_INT_REQ, BRIDGE_CAUSE); 155 atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
73 156
74 /* Enable watchdog timer */ 157 /* Enable watchdog timer */
75 reg = readl(wdt_reg + TIMER_CTRL); 158 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
76 reg |= WDT_EN; 159 dev->data->wdt_enable_bit);
77 writel(reg, wdt_reg + TIMER_CTRL); 160
161 atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
162 dev->data->rstout_enable_bit);
163 return 0;
164}
165
166static int orion_start(struct watchdog_device *wdt_dev)
167{
168 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
169
170 /* Set watchdog duration */
171 writel(dev->clk_rate * wdt_dev->timeout,
172 dev->reg + dev->data->wdt_counter_offset);
173
174 /* Enable watchdog timer */
175 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
176 dev->data->wdt_enable_bit);
78 177
79 /* Enable reset on watchdog */ 178 /* Enable reset on watchdog */
80 reg = readl(RSTOUTn_MASK); 179 atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
81 reg |= WDT_RESET_OUT_EN; 180 dev->data->rstout_enable_bit);
82 writel(reg, RSTOUTn_MASK);
83 181
84 spin_unlock(&wdt_lock);
85 return 0; 182 return 0;
86} 183}
87 184
88static int orion_wdt_stop(struct watchdog_device *wdt_dev) 185static int orion_wdt_start(struct watchdog_device *wdt_dev)
89{ 186{
90 u32 reg; 187 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
91 188
92 spin_lock(&wdt_lock); 189 /* There are some per-SoC quirks to handle */
190 return dev->data->start(wdt_dev);
191}
192
193static int orion_wdt_stop(struct watchdog_device *wdt_dev)
194{
195 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
93 196
94 /* Disable reset on watchdog */ 197 /* Disable reset on watchdog */
95 reg = readl(RSTOUTn_MASK); 198 atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0);
96 reg &= ~WDT_RESET_OUT_EN;
97 writel(reg, RSTOUTn_MASK);
98 199
99 /* Disable watchdog timer */ 200 /* Disable watchdog timer */
100 reg = readl(wdt_reg + TIMER_CTRL); 201 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
101 reg &= ~WDT_EN;
102 writel(reg, wdt_reg + TIMER_CTRL);
103 202
104 spin_unlock(&wdt_lock);
105 return 0; 203 return 0;
106} 204}
107 205
108static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev) 206static int orion_wdt_enabled(struct orion_watchdog *dev)
109{ 207{
110 unsigned int time_left; 208 bool enabled, running;
209
210 enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
211 running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
111 212
112 spin_lock(&wdt_lock); 213 return enabled && running;
113 time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk; 214}
114 spin_unlock(&wdt_lock);
115 215
116 return time_left; 216static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
217{
218 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
219 return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
117} 220}
118 221
119static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev, 222static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
@@ -137,68 +240,188 @@ static const struct watchdog_ops orion_wdt_ops = {
137 .get_timeleft = orion_wdt_get_timeleft, 240 .get_timeleft = orion_wdt_get_timeleft,
138}; 241};
139 242
140static struct watchdog_device orion_wdt = { 243static irqreturn_t orion_wdt_irq(int irq, void *devid)
141 .info = &orion_wdt_info, 244{
142 .ops = &orion_wdt_ops, 245 panic("Watchdog Timeout");
143 .min_timeout = 1, 246 return IRQ_HANDLED;
247}
248
249/*
250 * The original devicetree binding for this driver specified only
251 * one memory resource, so in order to keep DT backwards compatibility
252 * we try to fallback to a hardcoded register address, if the resource
253 * is missing from the devicetree.
254 */
255static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev,
256 phys_addr_t internal_regs)
257{
258 struct resource *res;
259 phys_addr_t rstout;
260
261 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
262 if (res)
263 return devm_ioremap(&pdev->dev, res->start,
264 resource_size(res));
265
266 /* This workaround works only for "orion-wdt", DT-enabled */
267 if (!of_device_is_compatible(pdev->dev.of_node, "marvell,orion-wdt"))
268 return NULL;
269
270 rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET;
271
272 WARN(1, FW_BUG "falling back to harcoded RSTOUT reg %pa\n", &rstout);
273 return devm_ioremap(&pdev->dev, rstout, 0x4);
274}
275
276static const struct orion_watchdog_data orion_data = {
277 .rstout_enable_bit = BIT(1),
278 .wdt_enable_bit = BIT(4),
279 .wdt_counter_offset = 0x24,
280 .clock_init = orion_wdt_clock_init,
281 .start = orion_start,
282};
283
284static const struct orion_watchdog_data armada370_data = {
285 .rstout_enable_bit = BIT(8),
286 .wdt_enable_bit = BIT(8),
287 .wdt_counter_offset = 0x34,
288 .clock_init = armada370_wdt_clock_init,
289 .start = armada370_start,
144}; 290};
145 291
292static const struct orion_watchdog_data armadaxp_data = {
293 .rstout_enable_bit = BIT(8),
294 .wdt_enable_bit = BIT(8),
295 .wdt_counter_offset = 0x34,
296 .clock_init = armadaxp_wdt_clock_init,
297 .start = armada370_start,
298};
299
300static const struct of_device_id orion_wdt_of_match_table[] = {
301 {
302 .compatible = "marvell,orion-wdt",
303 .data = &orion_data,
304 },
305 {
306 .compatible = "marvell,armada-370-wdt",
307 .data = &armada370_data,
308 },
309 {
310 .compatible = "marvell,armada-xp-wdt",
311 .data = &armadaxp_data,
312 },
313 {},
314};
315MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
316
146static int orion_wdt_probe(struct platform_device *pdev) 317static int orion_wdt_probe(struct platform_device *pdev)
147{ 318{
319 struct orion_watchdog *dev;
320 const struct of_device_id *match;
321 unsigned int wdt_max_duration; /* (seconds) */
148 struct resource *res; 322 struct resource *res;
149 int ret; 323 int ret, irq;
150 324
151 clk = devm_clk_get(&pdev->dev, NULL); 325 dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog),
152 if (IS_ERR(clk)) { 326 GFP_KERNEL);
153 dev_err(&pdev->dev, "Orion Watchdog missing clock\n"); 327 if (!dev)
154 return -ENODEV; 328 return -ENOMEM;
155 } 329
156 clk_prepare_enable(clk); 330 match = of_match_device(orion_wdt_of_match_table, &pdev->dev);
157 wdt_tclk = clk_get_rate(clk); 331 if (!match)
332 /* Default legacy match */
333 match = &orion_wdt_of_match_table[0];
334
335 dev->wdt.info = &orion_wdt_info;
336 dev->wdt.ops = &orion_wdt_ops;
337 dev->wdt.min_timeout = 1;
338 dev->data = match->data;
158 339
159 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 340 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
160 if (!res) 341 if (!res)
161 return -ENODEV; 342 return -ENODEV;
162 wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
163 if (!wdt_reg)
164 return -ENOMEM;
165 343
166 wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; 344 dev->reg = devm_ioremap(&pdev->dev, res->start,
345 resource_size(res));
346 if (!dev->reg)
347 return -ENOMEM;
167 348
168 orion_wdt.timeout = wdt_max_duration; 349 dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
169 orion_wdt.max_timeout = wdt_max_duration; 350 INTERNAL_REGS_MASK);
170 watchdog_init_timeout(&orion_wdt, heartbeat, &pdev->dev); 351 if (!dev->rstout)
352 return -ENODEV;
171 353
172 watchdog_set_nowayout(&orion_wdt, nowayout); 354 ret = dev->data->clock_init(pdev, dev);
173 ret = watchdog_register_device(&orion_wdt);
174 if (ret) { 355 if (ret) {
175 clk_disable_unprepare(clk); 356 dev_err(&pdev->dev, "cannot initialize clock\n");
176 return ret; 357 return ret;
177 } 358 }
178 359
360 wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate;
361
362 dev->wdt.timeout = wdt_max_duration;
363 dev->wdt.max_timeout = wdt_max_duration;
364 watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
365
366 platform_set_drvdata(pdev, &dev->wdt);
367 watchdog_set_drvdata(&dev->wdt, dev);
368
369 /*
370 * Let's make sure the watchdog is fully stopped, unless it's
371 * explicitly enabled. This may be the case if the module was
372 * removed and re-insterted, or if the bootloader explicitly
373 * set a running watchdog before booting the kernel.
374 */
375 if (!orion_wdt_enabled(dev))
376 orion_wdt_stop(&dev->wdt);
377
378 /* Request the IRQ only after the watchdog is disabled */
379 irq = platform_get_irq(pdev, 0);
380 if (irq > 0) {
381 /*
382 * Not all supported platforms specify an interrupt for the
383 * watchdog, so let's make it optional.
384 */
385 ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0,
386 pdev->name, dev);
387 if (ret < 0) {
388 dev_err(&pdev->dev, "failed to request IRQ\n");
389 goto disable_clk;
390 }
391 }
392
393 watchdog_set_nowayout(&dev->wdt, nowayout);
394 ret = watchdog_register_device(&dev->wdt);
395 if (ret)
396 goto disable_clk;
397
179 pr_info("Initial timeout %d sec%s\n", 398 pr_info("Initial timeout %d sec%s\n",
180 orion_wdt.timeout, nowayout ? ", nowayout" : ""); 399 dev->wdt.timeout, nowayout ? ", nowayout" : "");
181 return 0; 400 return 0;
401
402disable_clk:
403 clk_disable_unprepare(dev->clk);
404 clk_put(dev->clk);
405 return ret;
182} 406}
183 407
184static int orion_wdt_remove(struct platform_device *pdev) 408static int orion_wdt_remove(struct platform_device *pdev)
185{ 409{
186 watchdog_unregister_device(&orion_wdt); 410 struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
187 clk_disable_unprepare(clk); 411 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
412
413 watchdog_unregister_device(wdt_dev);
414 clk_disable_unprepare(dev->clk);
415 clk_put(dev->clk);
188 return 0; 416 return 0;
189} 417}
190 418
191static void orion_wdt_shutdown(struct platform_device *pdev) 419static void orion_wdt_shutdown(struct platform_device *pdev)
192{ 420{
193 orion_wdt_stop(&orion_wdt); 421 struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
422 orion_wdt_stop(wdt_dev);
194} 423}
195 424
196static const struct of_device_id orion_wdt_of_match_table[] = {
197 { .compatible = "marvell,orion-wdt", },
198 {},
199};
200MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
201
202static struct platform_driver orion_wdt_driver = { 425static struct platform_driver orion_wdt_driver = {
203 .probe = orion_wdt_probe, 426 .probe = orion_wdt_probe,
204 .remove = orion_wdt_remove, 427 .remove = orion_wdt_remove,