aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clk
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-05 21:39:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-05 21:39:18 -0400
commit19bc2eec3cbf9a282b592749a93ec9027d352bf2 (patch)
treebc7cff4dfebf2b256e62280bb006a494e967d4b2 /drivers/clk
parent9712d3c377a9868355ea9a611aca3c54f88dc576 (diff)
parente44df332f30bf3040c60c1ed6674d1431fdb48b9 (diff)
Merge tag 'clk-for-linus-3.15' of git://git.linaro.org/people/mike.turquette/linux
Pull clock framework changes from Mike Turquette: "The clock framework changes for 3.15 look similar to past pull requests. Mostly clock driver updates, more Device Tree support in the form of common functions useful across platforms and a handful of features and fixes to the framework core" * tag 'clk-for-linus-3.15' of git://git.linaro.org/people/mike.turquette/linux: (86 commits) clk: shmobile: fix setting paretn clock rate clk: shmobile: rcar-gen2: fix lb/sd0/sd1/sdh clock parent to pll1 clk: Fix minor errors in of_clk_init() function comments clk: reverse default clk provider initialization order in of_clk_init() clk: sirf: update copyright years to 2014 clk: mmp: try to use closer one when do round rate clk: mmp: fix the wrong calculation formula clk: mmp: fix wrong mask when calculate denominator clk: st: Adds quadfs clock binding clk: st: Adds clockgen-vcc and clockgen-mux clock binding clk: st: Adds clockgen clock binding clk: st: Adds divmux and prediv clock binding clk: st: Support for A9 MUX clocks clk: st: Support for ClockGenA9/DDR/GPU clk: st: Support for QUADFS inside ClockGenB/C/D/E/F clk: st: Support for VCC-mux and MUX clocks clk: st: Support for PLLs inside ClockGenA(s) clk: st: Support for DIVMUX and PreDiv Clocks clk: support hardware-specific debugfs entries clk: s2mps11: Use of_get_child_by_name ...
Diffstat (limited to 'drivers/clk')
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/at91/clk-programmable.c202
-rw-r--r--drivers/clk/at91/clk-system.c76
-rw-r--r--drivers/clk/clk-axi-clkgen.c312
-rw-r--r--drivers/clk/clk-divider.c10
-rw-r--r--drivers/clk/clk-moxart.c97
-rw-r--r--drivers/clk/clk-ppc-corenet.c70
-rw-r--r--drivers/clk/clk-s2mps11.c29
-rw-r--r--drivers/clk/clk.c131
-rw-r--r--drivers/clk/clkdev.c2
-rw-r--r--drivers/clk/hisilicon/Makefile5
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c298
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c58
-rw-r--r--drivers/clk/hisilicon/clk.c62
-rw-r--r--drivers/clk/hisilicon/clk.h17
-rw-r--r--drivers/clk/mmp/clk-frac.c20
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile2
-rw-r--r--drivers/clk/mvebu/armada-375.c184
-rw-r--r--drivers/clk/mvebu/armada-38x.c167
-rw-r--r--drivers/clk/mvebu/clk-corediv.c154
-rw-r--r--drivers/clk/shmobile/Makefile1
-rw-r--r--drivers/clk/shmobile/clk-div6.c2
-rw-r--r--drivers/clk/shmobile/clk-mstp.c2
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c8
-rw-r--r--drivers/clk/shmobile/clk-rz.c103
-rw-r--r--drivers/clk/sirf/clk-atlas6.c3
-rw-r--r--drivers/clk/sirf/clk-common.c3
-rw-r--r--drivers/clk/sirf/clk-prima2.c3
-rw-r--r--drivers/clk/socfpga/Makefile3
-rw-r--r--drivers/clk/socfpga/clk-gate.c263
-rw-r--r--drivers/clk/socfpga/clk-periph.c94
-rw-r--r--drivers/clk/socfpga/clk-pll.c131
-rw-r--r--drivers/clk/socfpga/clk.c326
-rw-r--r--drivers/clk/socfpga/clk.h57
-rw-r--r--drivers/clk/st/Makefile1
-rw-r--r--drivers/clk/st/clkgen-fsyn.c1039
-rw-r--r--drivers/clk/st/clkgen-mux.c820
-rw-r--r--drivers/clk/st/clkgen-pll.c698
-rw-r--r--drivers/clk/st/clkgen.h48
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c305
-rw-r--r--drivers/clk/tegra/clk-periph.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c1
-rw-r--r--drivers/clk/ti/divider.c8
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c3
-rw-r--r--drivers/clk/zynq/clkc.c4
-rw-r--r--drivers/clk/zynq/pll.c18
48 files changed, 5161 insertions, 698 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index f9f605695e40..6f56d3a4f010 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -65,10 +65,12 @@ config COMMON_CLK_SI570
65 clock generators. 65 clock generators.
66 66
67config COMMON_CLK_S2MPS11 67config COMMON_CLK_S2MPS11
68 tristate "Clock driver for S2MPS11 MFD" 68 tristate "Clock driver for S2MPS11/S5M8767 MFD"
69 depends on MFD_SEC_CORE 69 depends on MFD_SEC_CORE
70 ---help--- 70 ---help---
71 This driver supports S2MPS11 crystal oscillator clock. 71 This driver supports S2MPS11/S5M8767 crystal oscillator clock. These
72 multi-function devices have 3 fixed-rate oscillators, clocked at
73 32KHz each.
72 74
73config CLK_TWL6040 75config CLK_TWL6040
74 tristate "External McPDM functional clock from twl6040" 76 tristate "External McPDM functional clock from twl6040"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 88af4a399d6c..5f8a28735c96 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
17obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o 17obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
18obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o 18obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
19obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o 19obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
20obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
20obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o 21obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
21obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o 22obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
22obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o 23obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
31obj-$(CONFIG_COMMON_CLK_AT91) += at91/ 32obj-$(CONFIG_COMMON_CLK_AT91) += at91/
32obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/ 33obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/
33obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ 34obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
35obj-$(CONFIG_ARCH_HIP04) += hisilicon/
34obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ 36obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
35ifeq ($(CONFIG_COMMON_CLK), y) 37ifeq ($(CONFIG_COMMON_CLK), y)
36obj-$(CONFIG_ARCH_MMP) += mmp/ 38obj-$(CONFIG_ARCH_MMP) += mmp/
@@ -44,6 +46,7 @@ obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
44obj-$(CONFIG_ARCH_SIRF) += sirf/ 46obj-$(CONFIG_ARCH_SIRF) += sirf/
45obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/ 47obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
46obj-$(CONFIG_PLAT_SPEAR) += spear/ 48obj-$(CONFIG_PLAT_SPEAR) += spear/
49obj-$(CONFIG_ARCH_STI) += st/
47obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 50obj-$(CONFIG_ARCH_SUNXI) += sunxi/
48obj-$(CONFIG_ARCH_TEGRA) += tegra/ 51obj-$(CONFIG_ARCH_TEGRA) += tegra/
49obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/ 52obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index fd792b203eaf..62e2509f9df1 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -13,12 +13,9 @@
13#include <linux/clk/at91_pmc.h> 13#include <linux/clk/at91_pmc.h>
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/of_address.h> 15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17#include <linux/io.h> 16#include <linux/io.h>
18#include <linux/wait.h> 17#include <linux/wait.h>
19#include <linux/sched.h> 18#include <linux/sched.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22 19
23#include "pmc.h" 20#include "pmc.h"
24 21
@@ -38,104 +35,59 @@ struct clk_programmable_layout {
38struct clk_programmable { 35struct clk_programmable {
39 struct clk_hw hw; 36 struct clk_hw hw;
40 struct at91_pmc *pmc; 37 struct at91_pmc *pmc;
41 unsigned int irq;
42 wait_queue_head_t wait;
43 u8 id; 38 u8 id;
44 u8 css;
45 u8 pres;
46 u8 slckmck;
47 const struct clk_programmable_layout *layout; 39 const struct clk_programmable_layout *layout;
48}; 40};
49 41
50#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw) 42#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
51 43
52 44static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
53static irqreturn_t clk_programmable_irq_handler(int irq, void *dev_id) 45 unsigned long parent_rate)
54{
55 struct clk_programmable *prog = (struct clk_programmable *)dev_id;
56
57 wake_up(&prog->wait);
58
59 return IRQ_HANDLED;
60}
61
62static int clk_programmable_prepare(struct clk_hw *hw)
63{ 46{
64 u32 tmp; 47 u32 pres;
65 struct clk_programmable *prog = to_clk_programmable(hw); 48 struct clk_programmable *prog = to_clk_programmable(hw);
66 struct at91_pmc *pmc = prog->pmc; 49 struct at91_pmc *pmc = prog->pmc;
67 const struct clk_programmable_layout *layout = prog->layout; 50 const struct clk_programmable_layout *layout = prog->layout;
68 u8 id = prog->id;
69 u32 mask = PROG_STATUS_MASK(id);
70
71 tmp = prog->css | (prog->pres << layout->pres_shift);
72 if (layout->have_slck_mck && prog->slckmck)
73 tmp |= AT91_PMC_CSSMCK_MCK;
74
75 pmc_write(pmc, AT91_PMC_PCKR(id), tmp);
76
77 while (!(pmc_read(pmc, AT91_PMC_SR) & mask))
78 wait_event(prog->wait, pmc_read(pmc, AT91_PMC_SR) & mask);
79 51
80 return 0; 52 pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
53 PROG_PRES_MASK;
54 return parent_rate >> pres;
81} 55}
82 56
83static int clk_programmable_is_ready(struct clk_hw *hw) 57static long clk_programmable_determine_rate(struct clk_hw *hw,
58 unsigned long rate,
59 unsigned long *best_parent_rate,
60 struct clk **best_parent_clk)
84{ 61{
85 struct clk_programmable *prog = to_clk_programmable(hw); 62 struct clk *parent = NULL;
86 struct at91_pmc *pmc = prog->pmc; 63 long best_rate = -EINVAL;
87 64 unsigned long parent_rate;
88 return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_PCKR(prog->id)); 65 unsigned long tmp_rate;
89} 66 int shift;
67 int i;
90 68
91static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, 69 for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
92 unsigned long parent_rate) 70 parent = clk_get_parent_by_index(hw->clk, i);
93{ 71 if (!parent)
94 u32 tmp; 72 continue;
95 struct clk_programmable *prog = to_clk_programmable(hw);
96 struct at91_pmc *pmc = prog->pmc;
97 const struct clk_programmable_layout *layout = prog->layout;
98 73
99 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); 74 parent_rate = __clk_get_rate(parent);
100 prog->pres = (tmp >> layout->pres_shift) & PROG_PRES_MASK; 75 for (shift = 0; shift < PROG_PRES_MASK; shift++) {
76 tmp_rate = parent_rate >> shift;
77 if (tmp_rate <= rate)
78 break;
79 }
101 80
102 return parent_rate >> prog->pres; 81 if (tmp_rate > rate)
103} 82 continue;
104 83
105static long clk_programmable_round_rate(struct clk_hw *hw, unsigned long rate, 84 if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
106 unsigned long *parent_rate) 85 best_rate = tmp_rate;
107{ 86 *best_parent_rate = parent_rate;
108 unsigned long best_rate = *parent_rate; 87 *best_parent_clk = parent;
109 unsigned long best_diff;
110 unsigned long new_diff;
111 unsigned long cur_rate;
112 int shift = shift;
113
114 if (rate > *parent_rate)
115 return *parent_rate;
116 else
117 best_diff = *parent_rate - rate;
118
119 if (!best_diff)
120 return best_rate;
121
122 for (shift = 1; shift < PROG_PRES_MASK; shift++) {
123 cur_rate = *parent_rate >> shift;
124
125 if (cur_rate > rate)
126 new_diff = cur_rate - rate;
127 else
128 new_diff = rate - cur_rate;
129
130 if (!new_diff)
131 return cur_rate;
132
133 if (new_diff < best_diff) {
134 best_diff = new_diff;
135 best_rate = cur_rate;
136 } 88 }
137 89
138 if (rate > cur_rate) 90 if (!best_rate)
139 break; 91 break;
140 } 92 }
141 93
@@ -146,17 +98,22 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
146{ 98{
147 struct clk_programmable *prog = to_clk_programmable(hw); 99 struct clk_programmable *prog = to_clk_programmable(hw);
148 const struct clk_programmable_layout *layout = prog->layout; 100 const struct clk_programmable_layout *layout = prog->layout;
101 struct at91_pmc *pmc = prog->pmc;
102 u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
103
104 if (layout->have_slck_mck)
105 tmp &= AT91_PMC_CSSMCK_MCK;
106
149 if (index > layout->css_mask) { 107 if (index > layout->css_mask) {
150 if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) { 108 if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
151 prog->css = 0; 109 tmp |= AT91_PMC_CSSMCK_MCK;
152 prog->slckmck = 1;
153 return 0; 110 return 0;
154 } else { 111 } else {
155 return -EINVAL; 112 return -EINVAL;
156 } 113 }
157 } 114 }
158 115
159 prog->css = index; 116 pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
160 return 0; 117 return 0;
161} 118}
162 119
@@ -169,13 +126,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw)
169 const struct clk_programmable_layout *layout = prog->layout; 126 const struct clk_programmable_layout *layout = prog->layout;
170 127
171 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); 128 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
172 prog->css = tmp & layout->css_mask; 129 ret = tmp & layout->css_mask;
173 ret = prog->css; 130 if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
174 if (layout->have_slck_mck) { 131 ret = PROG_MAX_RM9200_CSS + 1;
175 prog->slckmck = !!(tmp & AT91_PMC_CSSMCK_MCK);
176 if (prog->slckmck && !ret)
177 ret = PROG_MAX_RM9200_CSS + 1;
178 }
179 132
180 return ret; 133 return ret;
181} 134}
@@ -184,67 +137,47 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
184 unsigned long parent_rate) 137 unsigned long parent_rate)
185{ 138{
186 struct clk_programmable *prog = to_clk_programmable(hw); 139 struct clk_programmable *prog = to_clk_programmable(hw);
187 unsigned long best_rate = parent_rate; 140 struct at91_pmc *pmc = prog->pmc;
188 unsigned long best_diff; 141 const struct clk_programmable_layout *layout = prog->layout;
189 unsigned long new_diff; 142 unsigned long div = parent_rate / rate;
190 unsigned long cur_rate;
191 int shift = 0; 143 int shift = 0;
144 u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
145 ~(PROG_PRES_MASK << layout->pres_shift);
192 146
193 if (rate > parent_rate) 147 if (!div)
194 return parent_rate; 148 return -EINVAL;
195 else
196 best_diff = parent_rate - rate;
197
198 if (!best_diff) {
199 prog->pres = shift;
200 return 0;
201 }
202 149
203 for (shift = 1; shift < PROG_PRES_MASK; shift++) { 150 shift = fls(div) - 1;
204 cur_rate = parent_rate >> shift;
205 151
206 if (cur_rate > rate) 152 if (div != (1<<shift))
207 new_diff = cur_rate - rate; 153 return -EINVAL;
208 else
209 new_diff = rate - cur_rate;
210 154
211 if (!new_diff) 155 if (shift >= PROG_PRES_MASK)
212 break; 156 return -EINVAL;
213 157
214 if (new_diff < best_diff) { 158 pmc_write(pmc, AT91_PMC_PCKR(prog->id),
215 best_diff = new_diff; 159 tmp | (shift << layout->pres_shift));
216 best_rate = cur_rate;
217 }
218 160
219 if (rate > cur_rate)
220 break;
221 }
222
223 prog->pres = shift;
224 return 0; 161 return 0;
225} 162}
226 163
227static const struct clk_ops programmable_ops = { 164static const struct clk_ops programmable_ops = {
228 .prepare = clk_programmable_prepare,
229 .is_prepared = clk_programmable_is_ready,
230 .recalc_rate = clk_programmable_recalc_rate, 165 .recalc_rate = clk_programmable_recalc_rate,
231 .round_rate = clk_programmable_round_rate, 166 .determine_rate = clk_programmable_determine_rate,
232 .get_parent = clk_programmable_get_parent, 167 .get_parent = clk_programmable_get_parent,
233 .set_parent = clk_programmable_set_parent, 168 .set_parent = clk_programmable_set_parent,
234 .set_rate = clk_programmable_set_rate, 169 .set_rate = clk_programmable_set_rate,
235}; 170};
236 171
237static struct clk * __init 172static struct clk * __init
238at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq, 173at91_clk_register_programmable(struct at91_pmc *pmc,
239 const char *name, const char **parent_names, 174 const char *name, const char **parent_names,
240 u8 num_parents, u8 id, 175 u8 num_parents, u8 id,
241 const struct clk_programmable_layout *layout) 176 const struct clk_programmable_layout *layout)
242{ 177{
243 int ret;
244 struct clk_programmable *prog; 178 struct clk_programmable *prog;
245 struct clk *clk = NULL; 179 struct clk *clk = NULL;
246 struct clk_init_data init; 180 struct clk_init_data init;
247 char irq_name[11];
248 181
249 if (id > PROG_ID_MAX) 182 if (id > PROG_ID_MAX)
250 return ERR_PTR(-EINVAL); 183 return ERR_PTR(-EINVAL);
@@ -263,14 +196,6 @@ at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq,
263 prog->layout = layout; 196 prog->layout = layout;
264 prog->hw.init = &init; 197 prog->hw.init = &init;
265 prog->pmc = pmc; 198 prog->pmc = pmc;
266 prog->irq = irq;
267 init_waitqueue_head(&prog->wait);
268 irq_set_status_flags(prog->irq, IRQ_NOAUTOEN);
269 snprintf(irq_name, sizeof(irq_name), "clk-prog%d", id);
270 ret = request_irq(prog->irq, clk_programmable_irq_handler,
271 IRQF_TRIGGER_HIGH, irq_name, prog);
272 if (ret)
273 return ERR_PTR(ret);
274 199
275 clk = clk_register(NULL, &prog->hw); 200 clk = clk_register(NULL, &prog->hw);
276 if (IS_ERR(clk)) 201 if (IS_ERR(clk))
@@ -304,7 +229,6 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
304 int num; 229 int num;
305 u32 id; 230 u32 id;
306 int i; 231 int i;
307 unsigned int irq;
308 struct clk *clk; 232 struct clk *clk;
309 int num_parents; 233 int num_parents;
310 const char *parent_names[PROG_SOURCE_MAX]; 234 const char *parent_names[PROG_SOURCE_MAX];
@@ -332,11 +256,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
332 if (of_property_read_string(np, "clock-output-names", &name)) 256 if (of_property_read_string(np, "clock-output-names", &name))
333 name = progclknp->name; 257 name = progclknp->name;
334 258
335 irq = irq_of_parse_and_map(progclknp, 0); 259 clk = at91_clk_register_programmable(pmc, name,
336 if (!irq)
337 continue;
338
339 clk = at91_clk_register_programmable(pmc, irq, name,
340 parent_names, num_parents, 260 parent_names, num_parents,
341 id, layout); 261 id, layout);
342 if (IS_ERR(clk)) 262 if (IS_ERR(clk))
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 8f7c0434a09f..8c96307d7363 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -14,6 +14,11 @@
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/of_address.h> 15#include <linux/of_address.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/of_irq.h>
19#include <linux/interrupt.h>
20#include <linux/wait.h>
21#include <linux/sched.h>
17 22
18#include "pmc.h" 23#include "pmc.h"
19 24
@@ -25,19 +30,48 @@
25struct clk_system { 30struct clk_system {
26 struct clk_hw hw; 31 struct clk_hw hw;
27 struct at91_pmc *pmc; 32 struct at91_pmc *pmc;
33 unsigned int irq;
34 wait_queue_head_t wait;
28 u8 id; 35 u8 id;
29}; 36};
30 37
31static int clk_system_enable(struct clk_hw *hw) 38static inline int is_pck(int id)
39{
40 return (id >= 8) && (id <= 15);
41}
42static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
43{
44 struct clk_system *sys = (struct clk_system *)dev_id;
45
46 wake_up(&sys->wait);
47 disable_irq_nosync(sys->irq);
48
49 return IRQ_HANDLED;
50}
51
52static int clk_system_prepare(struct clk_hw *hw)
32{ 53{
33 struct clk_system *sys = to_clk_system(hw); 54 struct clk_system *sys = to_clk_system(hw);
34 struct at91_pmc *pmc = sys->pmc; 55 struct at91_pmc *pmc = sys->pmc;
56 u32 mask = 1 << sys->id;
35 57
36 pmc_write(pmc, AT91_PMC_SCER, 1 << sys->id); 58 pmc_write(pmc, AT91_PMC_SCER, mask);
59
60 if (!is_pck(sys->id))
61 return 0;
62
63 while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
64 if (sys->irq) {
65 enable_irq(sys->irq);
66 wait_event(sys->wait,
67 pmc_read(pmc, AT91_PMC_SR) & mask);
68 } else
69 cpu_relax();
70 }
37 return 0; 71 return 0;
38} 72}
39 73
40static void clk_system_disable(struct clk_hw *hw) 74static void clk_system_unprepare(struct clk_hw *hw)
41{ 75{
42 struct clk_system *sys = to_clk_system(hw); 76 struct clk_system *sys = to_clk_system(hw);
43 struct at91_pmc *pmc = sys->pmc; 77 struct at91_pmc *pmc = sys->pmc;
@@ -45,27 +79,34 @@ static void clk_system_disable(struct clk_hw *hw)
45 pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id); 79 pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
46} 80}
47 81
48static int clk_system_is_enabled(struct clk_hw *hw) 82static int clk_system_is_prepared(struct clk_hw *hw)
49{ 83{
50 struct clk_system *sys = to_clk_system(hw); 84 struct clk_system *sys = to_clk_system(hw);
51 struct at91_pmc *pmc = sys->pmc; 85 struct at91_pmc *pmc = sys->pmc;
52 86
53 return !!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)); 87 if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
88 return 0;
89
90 if (!is_pck(sys->id))
91 return 1;
92
93 return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
54} 94}
55 95
56static const struct clk_ops system_ops = { 96static const struct clk_ops system_ops = {
57 .enable = clk_system_enable, 97 .prepare = clk_system_prepare,
58 .disable = clk_system_disable, 98 .unprepare = clk_system_unprepare,
59 .is_enabled = clk_system_is_enabled, 99 .is_prepared = clk_system_is_prepared,
60}; 100};
61 101
62static struct clk * __init 102static struct clk * __init
63at91_clk_register_system(struct at91_pmc *pmc, const char *name, 103at91_clk_register_system(struct at91_pmc *pmc, const char *name,
64 const char *parent_name, u8 id) 104 const char *parent_name, u8 id, int irq)
65{ 105{
66 struct clk_system *sys; 106 struct clk_system *sys;
67 struct clk *clk = NULL; 107 struct clk *clk = NULL;
68 struct clk_init_data init; 108 struct clk_init_data init;
109 int ret;
69 110
70 if (!parent_name || id > SYSTEM_MAX_ID) 111 if (!parent_name || id > SYSTEM_MAX_ID)
71 return ERR_PTR(-EINVAL); 112 return ERR_PTR(-EINVAL);
@@ -84,11 +125,20 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
84 * (see drivers/memory) which would request and enable the ddrck clock. 125 * (see drivers/memory) which would request and enable the ddrck clock.
85 * When this is done we will be able to remove CLK_IGNORE_UNUSED flag. 126 * When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
86 */ 127 */
87 init.flags = CLK_IGNORE_UNUSED; 128 init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
88 129
89 sys->id = id; 130 sys->id = id;
90 sys->hw.init = &init; 131 sys->hw.init = &init;
91 sys->pmc = pmc; 132 sys->pmc = pmc;
133 sys->irq = irq;
134 if (irq) {
135 init_waitqueue_head(&sys->wait);
136 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
137 ret = request_irq(sys->irq, clk_system_irq_handler,
138 IRQF_TRIGGER_HIGH, name, sys);
139 if (ret)
140 return ERR_PTR(ret);
141 }
92 142
93 clk = clk_register(NULL, &sys->hw); 143 clk = clk_register(NULL, &sys->hw);
94 if (IS_ERR(clk)) 144 if (IS_ERR(clk))
@@ -101,6 +151,7 @@ static void __init
101of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc) 151of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
102{ 152{
103 int num; 153 int num;
154 int irq = 0;
104 u32 id; 155 u32 id;
105 struct clk *clk; 156 struct clk *clk;
106 const char *name; 157 const char *name;
@@ -118,9 +169,12 @@ of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
118 if (of_property_read_string(np, "clock-output-names", &name)) 169 if (of_property_read_string(np, "clock-output-names", &name))
119 name = sysclknp->name; 170 name = sysclknp->name;
120 171
172 if (is_pck(id))
173 irq = irq_of_parse_and_map(sysclknp, 0);
174
121 parent_name = of_clk_get_parent_name(sysclknp, 0); 175 parent_name = of_clk_get_parent_name(sysclknp, 0);
122 176
123 clk = at91_clk_register_system(pmc, name, parent_name, id); 177 clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
124 if (IS_ERR(clk)) 178 if (IS_ERR(clk))
125 continue; 179 continue;
126 180
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 8137327847c3..1127ee46b802 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -17,23 +17,75 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/err.h> 18#include <linux/err.h>
19 19
20#define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04 20#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04
21#define AXI_CLKGEN_REG_CLK_OUT1 0x08 21#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08
22#define AXI_CLKGEN_REG_CLK_OUT2 0x0c 22#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c
23#define AXI_CLKGEN_REG_CLK_DIV 0x10 23#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10
24#define AXI_CLKGEN_REG_CLK_FB1 0x14 24#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14
25#define AXI_CLKGEN_REG_CLK_FB2 0x18 25#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18
26#define AXI_CLKGEN_REG_LOCK1 0x1c 26#define AXI_CLKGEN_V1_REG_LOCK1 0x1c
27#define AXI_CLKGEN_REG_LOCK2 0x20 27#define AXI_CLKGEN_V1_REG_LOCK2 0x20
28#define AXI_CLKGEN_REG_LOCK3 0x24 28#define AXI_CLKGEN_V1_REG_LOCK3 0x24
29#define AXI_CLKGEN_REG_FILTER1 0x28 29#define AXI_CLKGEN_V1_REG_FILTER1 0x28
30#define AXI_CLKGEN_REG_FILTER2 0x2c 30#define AXI_CLKGEN_V1_REG_FILTER2 0x2c
31
32#define AXI_CLKGEN_V2_REG_RESET 0x40
33#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
34#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
35
36#define AXI_CLKGEN_V2_RESET_MMCM_ENABLE BIT(1)
37#define AXI_CLKGEN_V2_RESET_ENABLE BIT(0)
38
39#define AXI_CLKGEN_V2_DRP_CNTRL_SEL BIT(29)
40#define AXI_CLKGEN_V2_DRP_CNTRL_READ BIT(28)
41
42#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
43
44#define MMCM_REG_CLKOUT0_1 0x08
45#define MMCM_REG_CLKOUT0_2 0x09
46#define MMCM_REG_CLK_FB1 0x14
47#define MMCM_REG_CLK_FB2 0x15
48#define MMCM_REG_CLK_DIV 0x16
49#define MMCM_REG_LOCK1 0x18
50#define MMCM_REG_LOCK2 0x19
51#define MMCM_REG_LOCK3 0x1a
52#define MMCM_REG_FILTER1 0x4e
53#define MMCM_REG_FILTER2 0x4f
54
55struct axi_clkgen;
56
57struct axi_clkgen_mmcm_ops {
58 void (*enable)(struct axi_clkgen *axi_clkgen, bool enable);
59 int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg,
60 unsigned int val, unsigned int mask);
61 int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg,
62 unsigned int *val);
63};
31 64
32struct axi_clkgen { 65struct axi_clkgen {
33 void __iomem *base; 66 void __iomem *base;
67 const struct axi_clkgen_mmcm_ops *mmcm_ops;
34 struct clk_hw clk_hw; 68 struct clk_hw clk_hw;
35}; 69};
36 70
71static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
72 bool enable)
73{
74 axi_clkgen->mmcm_ops->enable(axi_clkgen, enable);
75}
76
77static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
78 unsigned int reg, unsigned int val, unsigned int mask)
79{
80 return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask);
81}
82
83static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
84 unsigned int reg, unsigned int *val)
85{
86 return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val);
87}
88
37static uint32_t axi_clkgen_lookup_filter(unsigned int m) 89static uint32_t axi_clkgen_lookup_filter(unsigned int m)
38{ 90{
39 switch (m) { 91 switch (m) {
@@ -156,6 +208,148 @@ static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
156 *val = readl(axi_clkgen->base + reg); 208 *val = readl(axi_clkgen->base + reg);
157} 209}
158 210
211static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg)
212{
213 switch (reg) {
214 case MMCM_REG_CLKOUT0_1:
215 return AXI_CLKGEN_V1_REG_CLK_OUT1;
216 case MMCM_REG_CLKOUT0_2:
217 return AXI_CLKGEN_V1_REG_CLK_OUT2;
218 case MMCM_REG_CLK_FB1:
219 return AXI_CLKGEN_V1_REG_CLK_FB1;
220 case MMCM_REG_CLK_FB2:
221 return AXI_CLKGEN_V1_REG_CLK_FB2;
222 case MMCM_REG_CLK_DIV:
223 return AXI_CLKGEN_V1_REG_CLK_DIV;
224 case MMCM_REG_LOCK1:
225 return AXI_CLKGEN_V1_REG_LOCK1;
226 case MMCM_REG_LOCK2:
227 return AXI_CLKGEN_V1_REG_LOCK2;
228 case MMCM_REG_LOCK3:
229 return AXI_CLKGEN_V1_REG_LOCK3;
230 case MMCM_REG_FILTER1:
231 return AXI_CLKGEN_V1_REG_FILTER1;
232 case MMCM_REG_FILTER2:
233 return AXI_CLKGEN_V1_REG_FILTER2;
234 default:
235 return 0;
236 }
237}
238
239static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen,
240 unsigned int reg, unsigned int val, unsigned int mask)
241{
242 reg = axi_clkgen_v1_map_mmcm_reg(reg);
243 if (reg == 0)
244 return -EINVAL;
245
246 axi_clkgen_write(axi_clkgen, reg, val);
247
248 return 0;
249}
250
251static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen,
252 unsigned int reg, unsigned int *val)
253{
254 reg = axi_clkgen_v1_map_mmcm_reg(reg);
255 if (reg == 0)
256 return -EINVAL;
257
258 axi_clkgen_read(axi_clkgen, reg, val);
259
260 return 0;
261}
262
263static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen,
264 bool enable)
265{
266 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable);
267}
268
269static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = {
270 .write = axi_clkgen_v1_mmcm_write,
271 .read = axi_clkgen_v1_mmcm_read,
272 .enable = axi_clkgen_v1_mmcm_enable,
273};
274
275static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
276{
277 unsigned int timeout = 10000;
278 unsigned int val;
279
280 do {
281 axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_STATUS, &val);
282 } while ((val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) && --timeout);
283
284 if (val & AXI_CLKGEN_V2_DRP_STATUS_BUSY)
285 return -EIO;
286
287 return val & 0xffff;
288}
289
290static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
291 unsigned int reg, unsigned int *val)
292{
293 unsigned int reg_val;
294 int ret;
295
296 ret = axi_clkgen_wait_non_busy(axi_clkgen);
297 if (ret < 0)
298 return ret;
299
300 reg_val = AXI_CLKGEN_V2_DRP_CNTRL_SEL | AXI_CLKGEN_V2_DRP_CNTRL_READ;
301 reg_val |= (reg << 16);
302
303 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
304
305 ret = axi_clkgen_wait_non_busy(axi_clkgen);
306 if (ret < 0)
307 return ret;
308
309 *val = ret;
310
311 return 0;
312}
313
314static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
315 unsigned int reg, unsigned int val, unsigned int mask)
316{
317 unsigned int reg_val = 0;
318 int ret;
319
320 ret = axi_clkgen_wait_non_busy(axi_clkgen);
321 if (ret < 0)
322 return ret;
323
324 if (mask != 0xffff) {
325 axi_clkgen_v2_mmcm_read(axi_clkgen, reg, &reg_val);
326 reg_val &= ~mask;
327 }
328
329 reg_val |= AXI_CLKGEN_V2_DRP_CNTRL_SEL | (reg << 16) | (val & mask);
330
331 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
332
333 return 0;
334}
335
336static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
337 bool enable)
338{
339 unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
340
341 if (enable)
342 val |= AXI_CLKGEN_V2_RESET_MMCM_ENABLE;
343
344 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
345}
346
347static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = {
348 .write = axi_clkgen_v2_mmcm_write,
349 .read = axi_clkgen_v2_mmcm_read,
350 .enable = axi_clkgen_v2_mmcm_enable,
351};
352
159static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw) 353static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
160{ 354{
161 return container_of(clk_hw, struct axi_clkgen, clk_hw); 355 return container_of(clk_hw, struct axi_clkgen, clk_hw);
@@ -184,33 +378,29 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
184 filter = axi_clkgen_lookup_filter(m - 1); 378 filter = axi_clkgen_lookup_filter(m - 1);
185 lock = axi_clkgen_lookup_lock(m - 1); 379 lock = axi_clkgen_lookup_lock(m - 1);
186 380
187 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0);
188
189 axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount); 381 axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
190 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, 382 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_1,
191 (high << 6) | low); 383 (high << 6) | low, 0xefff);
192 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2, 384 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_2,
193 (edge << 7) | (nocount << 6)); 385 (edge << 7) | (nocount << 6), 0x03ff);
194 386
195 axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount); 387 axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
196 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, 388 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
197 (edge << 13) | (nocount << 12) | (high << 6) | low); 389 (edge << 13) | (nocount << 12) | (high << 6) | low, 0x3fff);
198 390
199 axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount); 391 axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
200 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, 392 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB1,
201 (high << 6) | low); 393 (high << 6) | low, 0xefff);
202 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2, 394 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB2,
203 (edge << 7) | (nocount << 6)); 395 (edge << 7) | (nocount << 6), 0x03ff);
204 396
205 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff); 397 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
206 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2, 398 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
207 (((lock >> 16) & 0x1f) << 10) | 0x1); 399 (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
208 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3, 400 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
209 (((lock >> 24) & 0x1f) << 10) | 0x3e9); 401 (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
210 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16); 402 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
211 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter); 403 axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
212
213 axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1);
214 404
215 return 0; 405 return 0;
216} 406}
@@ -236,11 +426,11 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
236 unsigned int reg; 426 unsigned int reg;
237 unsigned long long tmp; 427 unsigned long long tmp;
238 428
239 axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, &reg); 429 axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
240 dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); 430 dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
241 axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, &reg); 431 axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
242 d = (reg & 0x3f) + ((reg >> 6) & 0x3f); 432 d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
243 axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, &reg); 433 axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
244 m = (reg & 0x3f) + ((reg >> 6) & 0x3f); 434 m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
245 435
246 if (d == 0 || dout == 0) 436 if (d == 0 || dout == 0)
@@ -255,14 +445,45 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
255 return tmp; 445 return tmp;
256} 446}
257 447
448static int axi_clkgen_enable(struct clk_hw *clk_hw)
449{
450 struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
451
452 axi_clkgen_mmcm_enable(axi_clkgen, true);
453
454 return 0;
455}
456
457static void axi_clkgen_disable(struct clk_hw *clk_hw)
458{
459 struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
460
461 axi_clkgen_mmcm_enable(axi_clkgen, false);
462}
463
258static const struct clk_ops axi_clkgen_ops = { 464static const struct clk_ops axi_clkgen_ops = {
259 .recalc_rate = axi_clkgen_recalc_rate, 465 .recalc_rate = axi_clkgen_recalc_rate,
260 .round_rate = axi_clkgen_round_rate, 466 .round_rate = axi_clkgen_round_rate,
261 .set_rate = axi_clkgen_set_rate, 467 .set_rate = axi_clkgen_set_rate,
468 .enable = axi_clkgen_enable,
469 .disable = axi_clkgen_disable,
262}; 470};
263 471
472static const struct of_device_id axi_clkgen_ids[] = {
473 {
474 .compatible = "adi,axi-clkgen-1.00.a",
475 .data = &axi_clkgen_v1_mmcm_ops
476 }, {
477 .compatible = "adi,axi-clkgen-2.00.a",
478 .data = &axi_clkgen_v2_mmcm_ops,
479 },
480 { },
481};
482MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
483
264static int axi_clkgen_probe(struct platform_device *pdev) 484static int axi_clkgen_probe(struct platform_device *pdev)
265{ 485{
486 const struct of_device_id *id;
266 struct axi_clkgen *axi_clkgen; 487 struct axi_clkgen *axi_clkgen;
267 struct clk_init_data init; 488 struct clk_init_data init;
268 const char *parent_name; 489 const char *parent_name;
@@ -270,10 +491,19 @@ static int axi_clkgen_probe(struct platform_device *pdev)
270 struct resource *mem; 491 struct resource *mem;
271 struct clk *clk; 492 struct clk *clk;
272 493
494 if (!pdev->dev.of_node)
495 return -ENODEV;
496
497 id = of_match_node(axi_clkgen_ids, pdev->dev.of_node);
498 if (!id)
499 return -ENODEV;
500
273 axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL); 501 axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
274 if (!axi_clkgen) 502 if (!axi_clkgen)
275 return -ENOMEM; 503 return -ENOMEM;
276 504
505 axi_clkgen->mmcm_ops = id->data;
506
277 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 507 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
278 axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem); 508 axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
279 if (IS_ERR(axi_clkgen->base)) 509 if (IS_ERR(axi_clkgen->base))
@@ -289,10 +519,12 @@ static int axi_clkgen_probe(struct platform_device *pdev)
289 519
290 init.name = clk_name; 520 init.name = clk_name;
291 init.ops = &axi_clkgen_ops; 521 init.ops = &axi_clkgen_ops;
292 init.flags = 0; 522 init.flags = CLK_SET_RATE_GATE;
293 init.parent_names = &parent_name; 523 init.parent_names = &parent_name;
294 init.num_parents = 1; 524 init.num_parents = 1;
295 525
526 axi_clkgen_mmcm_enable(axi_clkgen, false);
527
296 axi_clkgen->clk_hw.init = &init; 528 axi_clkgen->clk_hw.init = &init;
297 clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw); 529 clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
298 if (IS_ERR(clk)) 530 if (IS_ERR(clk))
@@ -309,12 +541,6 @@ static int axi_clkgen_remove(struct platform_device *pdev)
309 return 0; 541 return 0;
310} 542}
311 543
312static const struct of_device_id axi_clkgen_ids[] = {
313 { .compatible = "adi,axi-clkgen-1.00.a" },
314 { },
315};
316MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
317
318static struct platform_driver axi_clkgen_driver = { 544static struct platform_driver axi_clkgen_driver = {
319 .driver = { 545 .driver = {
320 .name = "adi-axi-clkgen", 546 .name = "adi-axi-clkgen",
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 5543b7df8e16..ec22112e569f 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -24,7 +24,7 @@
24 * Traits of this clock: 24 * Traits of this clock:
25 * prepare - clk_prepare only ensures that parents are prepared 25 * prepare - clk_prepare only ensures that parents are prepared
26 * enable - clk_enable only ensures that parents are enabled 26 * enable - clk_enable only ensures that parents are enabled
27 * rate - rate is adjustable. clk->rate = parent->rate / divisor 27 * rate - rate is adjustable. clk->rate = DIV_ROUND_UP(parent->rate / divisor)
28 * parent - fixed parent. No clk_set_parent support 28 * parent - fixed parent. No clk_set_parent support
29 */ 29 */
30 30
@@ -115,7 +115,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
115 return parent_rate; 115 return parent_rate;
116 } 116 }
117 117
118 return parent_rate / div; 118 return DIV_ROUND_UP(parent_rate, div);
119} 119}
120 120
121/* 121/*
@@ -185,7 +185,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
185 } 185 }
186 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 186 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
187 MULT_ROUND_UP(rate, i)); 187 MULT_ROUND_UP(rate, i));
188 now = parent_rate / i; 188 now = DIV_ROUND_UP(parent_rate, i);
189 if (now <= rate && now > best) { 189 if (now <= rate && now > best) {
190 bestdiv = i; 190 bestdiv = i;
191 best = now; 191 best = now;
@@ -207,7 +207,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
207 int div; 207 int div;
208 div = clk_divider_bestdiv(hw, rate, prate); 208 div = clk_divider_bestdiv(hw, rate, prate);
209 209
210 return *prate / div; 210 return DIV_ROUND_UP(*prate, div);
211} 211}
212 212
213static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 213static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -218,7 +218,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
218 unsigned long flags = 0; 218 unsigned long flags = 0;
219 u32 val; 219 u32 val;
220 220
221 div = parent_rate / rate; 221 div = DIV_ROUND_UP(parent_rate, rate);
222 value = _get_val(divider, div); 222 value = _get_val(divider, div);
223 223
224 if (value > div_mask(divider)) 224 if (value > div_mask(divider))
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
new file mode 100644
index 000000000000..30a3b6999e10
--- /dev/null
+++ b/drivers/clk/clk-moxart.c
@@ -0,0 +1,97 @@
1/*
2 * MOXA ART SoCs clock driver.
3 *
4 * Copyright (C) 2013 Jonas Jensen
5 *
6 * Jonas Jensen <jonas.jensen@gmail.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/io.h>
15#include <linux/of_address.h>
16#include <linux/clkdev.h>
17
18void __init moxart_of_pll_clk_init(struct device_node *node)
19{
20 static void __iomem *base;
21 struct clk *clk, *ref_clk;
22 unsigned int mul;
23 const char *name = node->name;
24 const char *parent_name;
25
26 of_property_read_string(node, "clock-output-names", &name);
27 parent_name = of_clk_get_parent_name(node, 0);
28
29 base = of_iomap(node, 0);
30 if (!base) {
31 pr_err("%s: of_iomap failed\n", node->full_name);
32 return;
33 }
34
35 mul = readl(base + 0x30) >> 3 & 0x3f;
36 iounmap(base);
37
38 ref_clk = of_clk_get(node, 0);
39 if (IS_ERR(ref_clk)) {
40 pr_err("%s: of_clk_get failed\n", node->full_name);
41 return;
42 }
43
44 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
45 if (IS_ERR(clk)) {
46 pr_err("%s: failed to register clock\n", node->full_name);
47 return;
48 }
49
50 clk_register_clkdev(clk, NULL, name);
51 of_clk_add_provider(node, of_clk_src_simple_get, clk);
52}
53CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
54 moxart_of_pll_clk_init);
55
56void __init moxart_of_apb_clk_init(struct device_node *node)
57{
58 static void __iomem *base;
59 struct clk *clk, *pll_clk;
60 unsigned int div, val;
61 unsigned int div_idx[] = { 2, 3, 4, 6, 8};
62 const char *name = node->name;
63 const char *parent_name;
64
65 of_property_read_string(node, "clock-output-names", &name);
66 parent_name = of_clk_get_parent_name(node, 0);
67
68 base = of_iomap(node, 0);
69 if (!base) {
70 pr_err("%s: of_iomap failed\n", node->full_name);
71 return;
72 }
73
74 val = readl(base + 0xc) >> 4 & 0x7;
75 iounmap(base);
76
77 if (val > 4)
78 val = 0;
79 div = div_idx[val] * 2;
80
81 pll_clk = of_clk_get(node, 0);
82 if (IS_ERR(pll_clk)) {
83 pr_err("%s: of_clk_get failed\n", node->full_name);
84 return;
85 }
86
87 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
88 if (IS_ERR(clk)) {
89 pr_err("%s: failed to register clock\n", node->full_name);
90 return;
91 }
92
93 clk_register_clkdev(clk, NULL, name);
94 of_clk_add_provider(node, of_clk_src_simple_get, clk);
95}
96CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock",
97 moxart_of_apb_clk_init);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index c4f76ed914b0..8b284be4efa4 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -27,7 +27,6 @@ struct cmux_clk {
27#define CLKSEL_ADJUST BIT(0) 27#define CLKSEL_ADJUST BIT(0)
28#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) 28#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw)
29 29
30static void __iomem *base;
31static unsigned int clocks_per_pll; 30static unsigned int clocks_per_pll;
32 31
33static int cmux_set_parent(struct clk_hw *hw, u8 idx) 32static int cmux_set_parent(struct clk_hw *hw, u8 idx)
@@ -100,7 +99,11 @@ static void __init core_mux_init(struct device_node *np)
100 pr_err("%s: could not allocate cmux_clk\n", __func__); 99 pr_err("%s: could not allocate cmux_clk\n", __func__);
101 goto err_name; 100 goto err_name;
102 } 101 }
103 cmux_clk->reg = base + offset; 102 cmux_clk->reg = of_iomap(np, 0);
103 if (!cmux_clk->reg) {
104 pr_err("%s: could not map register\n", __func__);
105 goto err_clk;
106 }
104 107
105 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); 108 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
106 if (node && (offset >= 0x80)) 109 if (node && (offset >= 0x80))
@@ -143,38 +146,39 @@ err_name:
143 146
144static void __init core_pll_init(struct device_node *np) 147static void __init core_pll_init(struct device_node *np)
145{ 148{
146 u32 offset, mult; 149 u32 mult;
147 int i, rc, count; 150 int i, rc, count;
148 const char *clk_name, *parent_name; 151 const char *clk_name, *parent_name;
149 struct clk_onecell_data *onecell_data; 152 struct clk_onecell_data *onecell_data;
150 struct clk **subclks; 153 struct clk **subclks;
154 void __iomem *base;
151 155
152 rc = of_property_read_u32(np, "reg", &offset); 156 base = of_iomap(np, 0);
153 if (rc) { 157 if (!base) {
154 pr_err("%s: could not get reg property\n", np->name); 158 pr_err("clk-ppc: iomap error\n");
155 return; 159 return;
156 } 160 }
157 161
158 /* get the multiple of PLL */ 162 /* get the multiple of PLL */
159 mult = ioread32be(base + offset); 163 mult = ioread32be(base);
160 164
161 /* check if this PLL is disabled */ 165 /* check if this PLL is disabled */
162 if (mult & PLL_KILL) { 166 if (mult & PLL_KILL) {
163 pr_debug("PLL:%s is disabled\n", np->name); 167 pr_debug("PLL:%s is disabled\n", np->name);
164 return; 168 goto err_map;
165 } 169 }
166 mult = (mult >> 1) & 0x3f; 170 mult = (mult >> 1) & 0x3f;
167 171
168 parent_name = of_clk_get_parent_name(np, 0); 172 parent_name = of_clk_get_parent_name(np, 0);
169 if (!parent_name) { 173 if (!parent_name) {
170 pr_err("PLL: %s must have a parent\n", np->name); 174 pr_err("PLL: %s must have a parent\n", np->name);
171 return; 175 goto err_map;
172 } 176 }
173 177
174 count = of_property_count_strings(np, "clock-output-names"); 178 count = of_property_count_strings(np, "clock-output-names");
175 if (count < 0 || count > 4) { 179 if (count < 0 || count > 4) {
176 pr_err("%s: clock is not supported\n", np->name); 180 pr_err("%s: clock is not supported\n", np->name);
177 return; 181 goto err_map;
178 } 182 }
179 183
180 /* output clock number per PLL */ 184 /* output clock number per PLL */
@@ -183,7 +187,7 @@ static void __init core_pll_init(struct device_node *np)
183 subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); 187 subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
184 if (!subclks) { 188 if (!subclks) {
185 pr_err("%s: could not allocate subclks\n", __func__); 189 pr_err("%s: could not allocate subclks\n", __func__);
186 return; 190 goto err_map;
187 } 191 }
188 192
189 onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); 193 onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
@@ -230,30 +234,52 @@ static void __init core_pll_init(struct device_node *np)
230 goto err_cell; 234 goto err_cell;
231 } 235 }
232 236
237 iounmap(base);
233 return; 238 return;
234err_cell: 239err_cell:
235 kfree(onecell_data); 240 kfree(onecell_data);
236err_clks: 241err_clks:
237 kfree(subclks); 242 kfree(subclks);
243err_map:
244 iounmap(base);
245}
246
247static void __init sysclk_init(struct device_node *node)
248{
249 struct clk *clk;
250 const char *clk_name = node->name;
251 struct device_node *np = of_get_parent(node);
252 u32 rate;
253
254 if (!np) {
255 pr_err("ppc-clk: could not get parent node\n");
256 return;
257 }
258
259 if (of_property_read_u32(np, "clock-frequency", &rate)) {
260 of_node_put(node);
261 return;
262 }
263
264 of_property_read_string(np, "clock-output-names", &clk_name);
265
266 clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
267 if (!IS_ERR(clk))
268 of_clk_add_provider(np, of_clk_src_simple_get, clk);
238} 269}
239 270
240static const struct of_device_id clk_match[] __initconst = { 271static const struct of_device_id clk_match[] __initconst = {
241 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, }, 272 { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
242 { .compatible = "fsl,core-pll-clock", .data = core_pll_init, }, 273 { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
243 { .compatible = "fsl,core-mux-clock", .data = core_mux_init, }, 274 { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
275 { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
276 { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
277 { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
244 {} 278 {}
245}; 279};
246 280
247static int __init ppc_corenet_clk_probe(struct platform_device *pdev) 281static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
248{ 282{
249 struct device_node *np;
250
251 np = pdev->dev.of_node;
252 base = of_iomap(np, 0);
253 if (!base) {
254 dev_err(&pdev->dev, "iomap error\n");
255 return -ENOMEM;
256 }
257 of_clk_init(clk_match); 283 of_clk_init(clk_match);
258 284
259 return 0; 285 return 0;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 00a3abe103a5..f2f62a1bf61a 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -27,6 +27,7 @@
27#include <linux/clk-provider.h> 27#include <linux/clk-provider.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/mfd/samsung/s2mps11.h> 29#include <linux/mfd/samsung/s2mps11.h>
30#include <linux/mfd/samsung/s5m8767.h>
30#include <linux/mfd/samsung/core.h> 31#include <linux/mfd/samsung/core.h>
31 32
32#define s2mps11_name(a) (a->hw.init->name) 33#define s2mps11_name(a) (a->hw.init->name)
@@ -48,6 +49,7 @@ struct s2mps11_clk {
48 struct clk_lookup *lookup; 49 struct clk_lookup *lookup;
49 u32 mask; 50 u32 mask;
50 bool enabled; 51 bool enabled;
52 unsigned int reg;
51}; 53};
52 54
53static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw) 55static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
@@ -61,7 +63,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
61 int ret; 63 int ret;
62 64
63 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, 65 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
64 S2MPS11_REG_RTC_CTRL, 66 s2mps11->reg,
65 s2mps11->mask, s2mps11->mask); 67 s2mps11->mask, s2mps11->mask);
66 if (!ret) 68 if (!ret)
67 s2mps11->enabled = true; 69 s2mps11->enabled = true;
@@ -74,7 +76,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 76 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
75 int ret; 77 int ret;
76 78
77 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, 79 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
78 s2mps11->mask, ~s2mps11->mask); 80 s2mps11->mask, ~s2mps11->mask);
79 81
80 if (!ret) 82 if (!ret)
@@ -130,9 +132,9 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
130 int i; 132 int i;
131 133
132 if (!iodev->dev->of_node) 134 if (!iodev->dev->of_node)
133 return NULL; 135 return ERR_PTR(-EINVAL);
134 136
135 clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks"); 137 clk_np = of_get_child_by_name(iodev->dev->of_node, "clocks");
136 if (!clk_np) { 138 if (!clk_np) {
137 dev_err(&pdev->dev, "could not find clock sub-node\n"); 139 dev_err(&pdev->dev, "could not find clock sub-node\n");
138 return ERR_PTR(-EINVAL); 140 return ERR_PTR(-EINVAL);
@@ -155,6 +157,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
155 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); 157 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
156 struct s2mps11_clk *s2mps11_clks, *s2mps11_clk; 158 struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
157 struct device_node *clk_np = NULL; 159 struct device_node *clk_np = NULL;
160 unsigned int s2mps11_reg;
158 int i, ret = 0; 161 int i, ret = 0;
159 u32 val; 162 u32 val;
160 163
@@ -169,13 +172,26 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
169 if (IS_ERR(clk_np)) 172 if (IS_ERR(clk_np))
170 return PTR_ERR(clk_np); 173 return PTR_ERR(clk_np);
171 174
175 switch(platform_get_device_id(pdev)->driver_data) {
176 case S2MPS11X:
177 s2mps11_reg = S2MPS11_REG_RTC_CTRL;
178 break;
179 case S5M8767X:
180 s2mps11_reg = S5M8767_REG_CTRL1;
181 break;
182 default:
183 dev_err(&pdev->dev, "Invalid device type\n");
184 return -EINVAL;
185 };
186
172 for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) { 187 for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
173 s2mps11_clk->iodev = iodev; 188 s2mps11_clk->iodev = iodev;
174 s2mps11_clk->hw.init = &s2mps11_clks_init[i]; 189 s2mps11_clk->hw.init = &s2mps11_clks_init[i];
175 s2mps11_clk->mask = 1 << i; 190 s2mps11_clk->mask = 1 << i;
191 s2mps11_clk->reg = s2mps11_reg;
176 192
177 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, 193 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
178 S2MPS11_REG_RTC_CTRL, &val); 194 s2mps11_clk->reg, &val);
179 if (ret < 0) 195 if (ret < 0)
180 goto err_reg; 196 goto err_reg;
181 197
@@ -241,7 +257,8 @@ static int s2mps11_clk_remove(struct platform_device *pdev)
241} 257}
242 258
243static const struct platform_device_id s2mps11_clk_id[] = { 259static const struct platform_device_id s2mps11_clk_id[] = {
244 { "s2mps11-clk", 0}, 260 { "s2mps11-clk", S2MPS11X},
261 { "s5m8767-clk", S5M8767X},
245 { }, 262 { },
246}; 263};
247MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); 264MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c42e608af6bb..dff0373f53c1 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -277,6 +277,10 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
277 if (!d) 277 if (!d)
278 goto err_out; 278 goto err_out;
279 279
280 if (clk->ops->debug_init)
281 if (clk->ops->debug_init(clk->hw, clk->dentry))
282 goto err_out;
283
280 ret = 0; 284 ret = 0;
281 goto out; 285 goto out;
282 286
@@ -1339,8 +1343,11 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1339 if (clk->notifier_count) 1343 if (clk->notifier_count)
1340 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1344 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1341 1345
1342 if (ret & NOTIFY_STOP_MASK) 1346 if (ret & NOTIFY_STOP_MASK) {
1347 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1348 __func__, clk->name, ret);
1343 goto out; 1349 goto out;
1350 }
1344 1351
1345 hlist_for_each_entry(child, &clk->children, child_node) { 1352 hlist_for_each_entry(child, &clk->children, child_node) {
1346 ret = __clk_speculate_rates(child, new_rate); 1353 ret = __clk_speculate_rates(child, new_rate);
@@ -1588,7 +1595,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
1588 /* notify that we are about to change rates */ 1595 /* notify that we are about to change rates */
1589 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1596 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1590 if (fail_clk) { 1597 if (fail_clk) {
1591 pr_warn("%s: failed to set %s rate\n", __func__, 1598 pr_debug("%s: failed to set %s rate\n", __func__,
1592 fail_clk->name); 1599 fail_clk->name);
1593 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1600 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1594 ret = -EBUSY; 1601 ret = -EBUSY;
@@ -2260,20 +2267,11 @@ void __clk_put(struct clk *clk)
2260 * re-enter into the clk framework by calling any top-level clk APIs; 2267 * re-enter into the clk framework by calling any top-level clk APIs;
2261 * this will cause a nested prepare_lock mutex. 2268 * this will cause a nested prepare_lock mutex.
2262 * 2269 *
2263 * Pre-change notifier callbacks will be passed the current, pre-change 2270 * In all notification cases cases (pre, post and abort rate change) the
2264 * rate of the clk via struct clk_notifier_data.old_rate. The new, 2271 * original clock rate is passed to the callback via struct
2265 * post-change rate of the clk is passed via struct 2272 * clk_notifier_data.old_rate and the new frequency is passed via struct
2266 * clk_notifier_data.new_rate.
2267 *
2268 * Post-change notifiers will pass the now-current, post-change rate of
2269 * the clk in both struct clk_notifier_data.old_rate and struct
2270 * clk_notifier_data.new_rate. 2273 * clk_notifier_data.new_rate.
2271 * 2274 *
2272 * Abort-change notifiers are effectively the opposite of pre-change
2273 * notifiers: the original pre-change clk rate is passed in via struct
2274 * clk_notifier_data.new_rate and the failed post-change rate is passed
2275 * in via struct clk_notifier_data.old_rate.
2276 *
2277 * clk_notifier_register() must be called from non-atomic context. 2275 * clk_notifier_register() must be called from non-atomic context.
2278 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2276 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2279 * allocation failure; otherwise, passes along the return value of 2277 * allocation failure; otherwise, passes along the return value of
@@ -2473,7 +2471,7 @@ EXPORT_SYMBOL_GPL(of_clk_del_provider);
2473struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2471struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2474{ 2472{
2475 struct of_clk_provider *provider; 2473 struct of_clk_provider *provider;
2476 struct clk *clk = ERR_PTR(-ENOENT); 2474 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2477 2475
2478 /* Check if we have such a provider in our array */ 2476 /* Check if we have such a provider in our array */
2479 list_for_each_entry(provider, &of_clk_providers, link) { 2477 list_for_each_entry(provider, &of_clk_providers, link) {
@@ -2506,8 +2504,12 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2506const char *of_clk_get_parent_name(struct device_node *np, int index) 2504const char *of_clk_get_parent_name(struct device_node *np, int index)
2507{ 2505{
2508 struct of_phandle_args clkspec; 2506 struct of_phandle_args clkspec;
2507 struct property *prop;
2509 const char *clk_name; 2508 const char *clk_name;
2509 const __be32 *vp;
2510 u32 pv;
2510 int rc; 2511 int rc;
2512 int count;
2511 2513
2512 if (index < 0) 2514 if (index < 0)
2513 return NULL; 2515 return NULL;
@@ -2517,8 +2519,22 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
2517 if (rc) 2519 if (rc)
2518 return NULL; 2520 return NULL;
2519 2521
2522 index = clkspec.args_count ? clkspec.args[0] : 0;
2523 count = 0;
2524
2525 /* if there is an indices property, use it to transfer the index
2526 * specified into an array offset for the clock-output-names property.
2527 */
2528 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
2529 if (index == pv) {
2530 index = count;
2531 break;
2532 }
2533 count++;
2534 }
2535
2520 if (of_property_read_string_index(clkspec.np, "clock-output-names", 2536 if (of_property_read_string_index(clkspec.np, "clock-output-names",
2521 clkspec.args_count ? clkspec.args[0] : 0, 2537 index,
2522 &clk_name) < 0) 2538 &clk_name) < 0)
2523 clk_name = clkspec.np->name; 2539 clk_name = clkspec.np->name;
2524 2540
@@ -2527,24 +2543,99 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
2527} 2543}
2528EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 2544EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2529 2545
2546struct clock_provider {
2547 of_clk_init_cb_t clk_init_cb;
2548 struct device_node *np;
2549 struct list_head node;
2550};
2551
2552static LIST_HEAD(clk_provider_list);
2553
2554/*
2555 * This function looks for a parent clock. If there is one, then it
2556 * checks that the provider for this parent clock was initialized, in
2557 * this case the parent clock will be ready.
2558 */
2559static int parent_ready(struct device_node *np)
2560{
2561 int i = 0;
2562
2563 while (true) {
2564 struct clk *clk = of_clk_get(np, i);
2565
2566 /* this parent is ready we can check the next one */
2567 if (!IS_ERR(clk)) {
2568 clk_put(clk);
2569 i++;
2570 continue;
2571 }
2572
2573 /* at least one parent is not ready, we exit now */
2574 if (PTR_ERR(clk) == -EPROBE_DEFER)
2575 return 0;
2576
2577 /*
2578 * Here we make assumption that the device tree is
2579 * written correctly. So an error means that there is
2580 * no more parent. As we didn't exit yet, then the
2581 * previous parent are ready. If there is no clock
2582 * parent, no need to wait for them, then we can
2583 * consider their absence as being ready
2584 */
2585 return 1;
2586 }
2587}
2588
2530/** 2589/**
2531 * of_clk_init() - Scan and init clock providers from the DT 2590 * of_clk_init() - Scan and init clock providers from the DT
2532 * @matches: array of compatible values and init functions for providers. 2591 * @matches: array of compatible values and init functions for providers.
2533 * 2592 *
2534 * This function scans the device tree for matching clock providers and 2593 * This function scans the device tree for matching clock providers
2535 * calls their initialization functions 2594 * and calls their initialization functions. It also does it by trying
2595 * to follow the dependencies.
2536 */ 2596 */
2537void __init of_clk_init(const struct of_device_id *matches) 2597void __init of_clk_init(const struct of_device_id *matches)
2538{ 2598{
2539 const struct of_device_id *match; 2599 const struct of_device_id *match;
2540 struct device_node *np; 2600 struct device_node *np;
2601 struct clock_provider *clk_provider, *next;
2602 bool is_init_done;
2603 bool force = false;
2541 2604
2542 if (!matches) 2605 if (!matches)
2543 matches = &__clk_of_table; 2606 matches = &__clk_of_table;
2544 2607
2608 /* First prepare the list of the clocks providers */
2545 for_each_matching_node_and_match(np, matches, &match) { 2609 for_each_matching_node_and_match(np, matches, &match) {
2546 of_clk_init_cb_t clk_init_cb = match->data; 2610 struct clock_provider *parent =
2547 clk_init_cb(np); 2611 kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
2612
2613 parent->clk_init_cb = match->data;
2614 parent->np = np;
2615 list_add_tail(&parent->node, &clk_provider_list);
2616 }
2617
2618 while (!list_empty(&clk_provider_list)) {
2619 is_init_done = false;
2620 list_for_each_entry_safe(clk_provider, next,
2621 &clk_provider_list, node) {
2622 if (force || parent_ready(clk_provider->np)) {
2623 clk_provider->clk_init_cb(clk_provider->np);
2624 list_del(&clk_provider->node);
2625 kfree(clk_provider);
2626 is_init_done = true;
2627 }
2628 }
2629
2630 /*
2631 * We didn't manage to initialize any of the
2632 * remaining providers during the last loop, so now we
2633 * initialize all the remaining ones unconditionally
2634 * in case the clock parent was not mandatory
2635 */
2636 if (!is_init_done)
2637 force = true;
2638
2548 } 2639 }
2549} 2640}
2550#endif 2641#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 48f67218247c..a360b2eca5cb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -167,6 +167,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
167 clk = of_clk_get_by_name(dev->of_node, con_id); 167 clk = of_clk_get_by_name(dev->of_node, con_id);
168 if (!IS_ERR(clk)) 168 if (!IS_ERR(clk))
169 return clk; 169 return clk;
170 if (PTR_ERR(clk) == -EPROBE_DEFER)
171 return clk;
170 } 172 }
171 173
172 return clk_get_sys(dev_id, con_id); 174 return clk_get_sys(dev_id, con_id);
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index a049108341fc..40b33c6a8257 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -2,4 +2,7 @@
2# Hisilicon Clock specific Makefile 2# Hisilicon Clock specific Makefile
3# 3#
4 4
5obj-y += clk.o clkgate-separated.o clk-hi3620.o 5obj-y += clk.o clkgate-separated.o
6
7obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
8obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index f24ad6a3a797..339945d2503b 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -210,33 +210,297 @@ static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = {
210 210
211static void __init hi3620_clk_init(struct device_node *np) 211static void __init hi3620_clk_init(struct device_node *np)
212{ 212{
213 void __iomem *base; 213 struct hisi_clock_data *clk_data;
214 214
215 if (np) { 215 clk_data = hisi_clk_init(np, HI3620_NR_CLKS);
216 base = of_iomap(np, 0); 216 if (!clk_data)
217 if (!base) {
218 pr_err("failed to map Hi3620 clock registers\n");
219 return;
220 }
221 } else {
222 pr_err("failed to find Hi3620 clock node in DTS\n");
223 return; 217 return;
224 }
225
226 hisi_clk_init(np, HI3620_NR_CLKS);
227 218
228 hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks, 219 hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks,
229 ARRAY_SIZE(hi3620_fixed_rate_clks), 220 ARRAY_SIZE(hi3620_fixed_rate_clks),
230 base); 221 clk_data);
231 hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks, 222 hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks,
232 ARRAY_SIZE(hi3620_fixed_factor_clks), 223 ARRAY_SIZE(hi3620_fixed_factor_clks),
233 base); 224 clk_data);
234 hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks), 225 hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks),
235 base); 226 clk_data);
236 hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks), 227 hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks),
237 base); 228 clk_data);
238 hisi_clk_register_gate_sep(hi3620_seperated_gate_clks, 229 hisi_clk_register_gate_sep(hi3620_seperated_gate_clks,
239 ARRAY_SIZE(hi3620_seperated_gate_clks), 230 ARRAY_SIZE(hi3620_seperated_gate_clks),
240 base); 231 clk_data);
241} 232}
242CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init); 233CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init);
234
235struct hisi_mmc_clock {
236 unsigned int id;
237 const char *name;
238 const char *parent_name;
239 unsigned long flags;
240 u32 clken_reg;
241 u32 clken_bit;
242 u32 div_reg;
243 u32 div_off;
244 u32 div_bits;
245 u32 drv_reg;
246 u32 drv_off;
247 u32 drv_bits;
248 u32 sam_reg;
249 u32 sam_off;
250 u32 sam_bits;
251};
252
253struct clk_mmc {
254 struct clk_hw hw;
255 u32 id;
256 void __iomem *clken_reg;
257 u32 clken_bit;
258 void __iomem *div_reg;
259 u32 div_off;
260 u32 div_bits;
261 void __iomem *drv_reg;
262 u32 drv_off;
263 u32 drv_bits;
264 void __iomem *sam_reg;
265 u32 sam_off;
266 u32 sam_bits;
267};
268
269#define to_mmc(_hw) container_of(_hw, struct clk_mmc, hw)
270
271static struct hisi_mmc_clock hi3620_mmc_clks[] __initdata = {
272 { HI3620_SD_CIUCLK, "sd_bclk1", "sd_clk", CLK_SET_RATE_PARENT, 0x1f8, 0, 0x1f8, 1, 3, 0x1f8, 4, 4, 0x1f8, 8, 4},
273 { HI3620_MMC_CIUCLK1, "mmc_bclk1", "mmc_clk1", CLK_SET_RATE_PARENT, 0x1f8, 12, 0x1f8, 13, 3, 0x1f8, 16, 4, 0x1f8, 20, 4},
274 { HI3620_MMC_CIUCLK2, "mmc_bclk2", "mmc_clk2", CLK_SET_RATE_PARENT, 0x1f8, 24, 0x1f8, 25, 3, 0x1f8, 28, 4, 0x1fc, 0, 4},
275 { HI3620_MMC_CIUCLK3, "mmc_bclk3", "mmc_clk3", CLK_SET_RATE_PARENT, 0x1fc, 4, 0x1fc, 5, 3, 0x1fc, 8, 4, 0x1fc, 12, 4},
276};
277
278static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
279 unsigned long parent_rate)
280{
281 switch (parent_rate) {
282 case 26000000:
283 return 13000000;
284 case 180000000:
285 return 25000000;
286 case 360000000:
287 return 50000000;
288 case 720000000:
289 return 100000000;
290 case 1440000000:
291 return 180000000;
292 default:
293 return parent_rate;
294 }
295}
296
297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
298 unsigned long *best_parent_rate,
299 struct clk **best_parent_p)
300{
301 struct clk_mmc *mclk = to_mmc(hw);
302 unsigned long best = 0;
303
304 if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
305 rate = 13000000;
306 best = 26000000;
307 } else if (rate <= 26000000) {
308 rate = 25000000;
309 best = 180000000;
310 } else if (rate <= 52000000) {
311 rate = 50000000;
312 best = 360000000;
313 } else if (rate <= 100000000) {
314 rate = 100000000;
315 best = 720000000;
316 } else {
317 /* max is 180M */
318 rate = 180000000;
319 best = 1440000000;
320 }
321 *best_parent_rate = best;
322 return rate;
323}
324
325static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len)
326{
327 u32 i;
328
329 for (i = 0; i < len; i++) {
330 if (para % 2)
331 val |= 1 << (off + i);
332 else
333 val &= ~(1 << (off + i));
334 para = para >> 1;
335 }
336
337 return val;
338}
339
340static int mmc_clk_set_timing(struct clk_hw *hw, unsigned long rate)
341{
342 struct clk_mmc *mclk = to_mmc(hw);
343 unsigned long flags;
344 u32 sam, drv, div, val;
345 static DEFINE_SPINLOCK(mmc_clk_lock);
346
347 switch (rate) {
348 case 13000000:
349 sam = 3;
350 drv = 1;
351 div = 1;
352 break;
353 case 25000000:
354 sam = 13;
355 drv = 6;
356 div = 6;
357 break;
358 case 50000000:
359 sam = 3;
360 drv = 6;
361 div = 6;
362 break;
363 case 100000000:
364 sam = 6;
365 drv = 4;
366 div = 6;
367 break;
368 case 180000000:
369 sam = 6;
370 drv = 4;
371 div = 7;
372 break;
373 default:
374 return -EINVAL;
375 }
376
377 spin_lock_irqsave(&mmc_clk_lock, flags);
378
379 val = readl_relaxed(mclk->clken_reg);
380 val &= ~(1 << mclk->clken_bit);
381 writel_relaxed(val, mclk->clken_reg);
382
383 val = readl_relaxed(mclk->sam_reg);
384 val = mmc_clk_delay(val, sam, mclk->sam_off, mclk->sam_bits);
385 writel_relaxed(val, mclk->sam_reg);
386
387 val = readl_relaxed(mclk->drv_reg);
388 val = mmc_clk_delay(val, drv, mclk->drv_off, mclk->drv_bits);
389 writel_relaxed(val, mclk->drv_reg);
390
391 val = readl_relaxed(mclk->div_reg);
392 val = mmc_clk_delay(val, div, mclk->div_off, mclk->div_bits);
393 writel_relaxed(val, mclk->div_reg);
394
395 val = readl_relaxed(mclk->clken_reg);
396 val |= 1 << mclk->clken_bit;
397 writel_relaxed(val, mclk->clken_reg);
398
399 spin_unlock_irqrestore(&mmc_clk_lock, flags);
400
401 return 0;
402}
403
404static int mmc_clk_prepare(struct clk_hw *hw)
405{
406 struct clk_mmc *mclk = to_mmc(hw);
407 unsigned long rate;
408
409 if (mclk->id == HI3620_MMC_CIUCLK1)
410 rate = 13000000;
411 else
412 rate = 25000000;
413
414 return mmc_clk_set_timing(hw, rate);
415}
416
417static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
418 unsigned long parent_rate)
419{
420 return mmc_clk_set_timing(hw, rate);
421}
422
423static struct clk_ops clk_mmc_ops = {
424 .prepare = mmc_clk_prepare,
425 .determine_rate = mmc_clk_determine_rate,
426 .set_rate = mmc_clk_set_rate,
427 .recalc_rate = mmc_clk_recalc_rate,
428};
429
430static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk,
431 void __iomem *base, struct device_node *np)
432{
433 struct clk_mmc *mclk;
434 struct clk *clk;
435 struct clk_init_data init;
436
437 mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
438 if (!mclk) {
439 pr_err("%s: fail to allocate mmc clk\n", __func__);
440 return ERR_PTR(-ENOMEM);
441 }
442
443 init.name = mmc_clk->name;
444 init.ops = &clk_mmc_ops;
445 init.flags = mmc_clk->flags | CLK_IS_BASIC;
446 init.parent_names = (mmc_clk->parent_name ? &mmc_clk->parent_name : NULL);
447 init.num_parents = (mmc_clk->parent_name ? 1 : 0);
448 mclk->hw.init = &init;
449
450 mclk->id = mmc_clk->id;
451 mclk->clken_reg = base + mmc_clk->clken_reg;
452 mclk->clken_bit = mmc_clk->clken_bit;
453 mclk->div_reg = base + mmc_clk->div_reg;
454 mclk->div_off = mmc_clk->div_off;
455 mclk->div_bits = mmc_clk->div_bits;
456 mclk->drv_reg = base + mmc_clk->drv_reg;
457 mclk->drv_off = mmc_clk->drv_off;
458 mclk->drv_bits = mmc_clk->drv_bits;
459 mclk->sam_reg = base + mmc_clk->sam_reg;
460 mclk->sam_off = mmc_clk->sam_off;
461 mclk->sam_bits = mmc_clk->sam_bits;
462
463 clk = clk_register(NULL, &mclk->hw);
464 if (WARN_ON(IS_ERR(clk)))
465 kfree(mclk);
466 return clk;
467}
468
469static void __init hi3620_mmc_clk_init(struct device_node *node)
470{
471 void __iomem *base;
472 int i, num = ARRAY_SIZE(hi3620_mmc_clks);
473 struct clk_onecell_data *clk_data;
474
475 if (!node) {
476 pr_err("failed to find pctrl node in DTS\n");
477 return;
478 }
479
480 base = of_iomap(node, 0);
481 if (!base) {
482 pr_err("failed to map pctrl\n");
483 return;
484 }
485
486 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
487 if (WARN_ON(!clk_data))
488 return;
489
490 clk_data->clks = kzalloc(sizeof(struct clk *) * num, GFP_KERNEL);
491 if (!clk_data->clks) {
492 pr_err("%s: fail to allocate mmc clk\n", __func__);
493 return;
494 }
495
496 for (i = 0; i < num; i++) {
497 struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i];
498 clk_data->clks[mmc_clk->id] =
499 hisi_register_clk_mmc(mmc_clk, base, node);
500 }
501
502 clk_data->clk_num = num;
503 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
504}
505
506CLK_OF_DECLARE(hi3620_mmc_clk, "hisilicon,hi3620-mmc-clock", hi3620_mmc_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
new file mode 100644
index 000000000000..132b57a0ce09
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -0,0 +1,58 @@
1/*
2 * Hisilicon HiP04 clock driver
3 *
4 * Copyright (c) 2013-2014 Hisilicon Limited.
5 * Copyright (c) 2013-2014 Linaro Limited.
6 *
7 * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/clk-provider.h>
27#include <linux/clkdev.h>
28#include <linux/io.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/of_device.h>
32#include <linux/slab.h>
33#include <linux/clk.h>
34
35#include <dt-bindings/clock/hip04-clock.h>
36
37#include "clk.h"
38
39/* fixed rate clocks */
40static struct hisi_fixed_rate_clock hip04_fixed_rate_clks[] __initdata = {
41 { HIP04_OSC50M, "osc50m", NULL, CLK_IS_ROOT, 50000000, },
42 { HIP04_CLK_50M, "clk50m", NULL, CLK_IS_ROOT, 50000000, },
43 { HIP04_CLK_168M, "clk168m", NULL, CLK_IS_ROOT, 168750000, },
44};
45
46static void __init hip04_clk_init(struct device_node *np)
47{
48 struct hisi_clock_data *clk_data;
49
50 clk_data = hisi_clk_init(np, HIP04_NR_CLKS);
51 if (!clk_data)
52 return;
53
54 hisi_clk_register_fixed_rate(hip04_fixed_rate_clks,
55 ARRAY_SIZE(hip04_fixed_rate_clks),
56 clk_data);
57}
58CLK_OF_DECLARE(hip04_clk, "hisilicon,hip04-clock", hip04_clk_init);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index a3a7152c92d9..276f672e7b1a 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -37,23 +37,49 @@
37#include "clk.h" 37#include "clk.h"
38 38
39static DEFINE_SPINLOCK(hisi_clk_lock); 39static DEFINE_SPINLOCK(hisi_clk_lock);
40static struct clk **clk_table;
41static struct clk_onecell_data clk_data;
42 40
43void __init hisi_clk_init(struct device_node *np, int nr_clks) 41struct hisi_clock_data __init *hisi_clk_init(struct device_node *np,
42 int nr_clks)
44{ 43{
44 struct hisi_clock_data *clk_data;
45 struct clk **clk_table;
46 void __iomem *base;
47
48 if (np) {
49 base = of_iomap(np, 0);
50 if (!base) {
51 pr_err("failed to map Hisilicon clock registers\n");
52 goto err;
53 }
54 } else {
55 pr_err("failed to find Hisilicon clock node in DTS\n");
56 goto err;
57 }
58
59 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
60 if (!clk_data) {
61 pr_err("%s: could not allocate clock data\n", __func__);
62 goto err;
63 }
64 clk_data->base = base;
65
45 clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); 66 clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
46 if (!clk_table) { 67 if (!clk_table) {
47 pr_err("%s: could not allocate clock lookup table\n", __func__); 68 pr_err("%s: could not allocate clock lookup table\n", __func__);
48 return; 69 goto err_data;
49 } 70 }
50 clk_data.clks = clk_table; 71 clk_data->clk_data.clks = clk_table;
51 clk_data.clk_num = nr_clks; 72 clk_data->clk_data.clk_num = nr_clks;
52 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); 73 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
74 return clk_data;
75err_data:
76 kfree(clk_data);
77err:
78 return NULL;
53} 79}
54 80
55void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks, 81void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
56 int nums, void __iomem *base) 82 int nums, struct hisi_clock_data *data)
57{ 83{
58 struct clk *clk; 84 struct clk *clk;
59 int i; 85 int i;
@@ -68,11 +94,13 @@ void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
68 __func__, clks[i].name); 94 __func__, clks[i].name);
69 continue; 95 continue;
70 } 96 }
97 data->clk_data.clks[clks[i].id] = clk;
71 } 98 }
72} 99}
73 100
74void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks, 101void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
75 int nums, void __iomem *base) 102 int nums,
103 struct hisi_clock_data *data)
76{ 104{
77 struct clk *clk; 105 struct clk *clk;
78 int i; 106 int i;
@@ -87,13 +115,15 @@ void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
87 __func__, clks[i].name); 115 __func__, clks[i].name);
88 continue; 116 continue;
89 } 117 }
118 data->clk_data.clks[clks[i].id] = clk;
90 } 119 }
91} 120}
92 121
93void __init hisi_clk_register_mux(struct hisi_mux_clock *clks, 122void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
94 int nums, void __iomem *base) 123 int nums, struct hisi_clock_data *data)
95{ 124{
96 struct clk *clk; 125 struct clk *clk;
126 void __iomem *base = data->base;
97 int i; 127 int i;
98 128
99 for (i = 0; i < nums; i++) { 129 for (i = 0; i < nums; i++) {
@@ -111,14 +141,15 @@ void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
111 if (clks[i].alias) 141 if (clks[i].alias)
112 clk_register_clkdev(clk, clks[i].alias, NULL); 142 clk_register_clkdev(clk, clks[i].alias, NULL);
113 143
114 clk_table[clks[i].id] = clk; 144 data->clk_data.clks[clks[i].id] = clk;
115 } 145 }
116} 146}
117 147
118void __init hisi_clk_register_divider(struct hisi_divider_clock *clks, 148void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
119 int nums, void __iomem *base) 149 int nums, struct hisi_clock_data *data)
120{ 150{
121 struct clk *clk; 151 struct clk *clk;
152 void __iomem *base = data->base;
122 int i; 153 int i;
123 154
124 for (i = 0; i < nums; i++) { 155 for (i = 0; i < nums; i++) {
@@ -139,14 +170,15 @@ void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
139 if (clks[i].alias) 170 if (clks[i].alias)
140 clk_register_clkdev(clk, clks[i].alias, NULL); 171 clk_register_clkdev(clk, clks[i].alias, NULL);
141 172
142 clk_table[clks[i].id] = clk; 173 data->clk_data.clks[clks[i].id] = clk;
143 } 174 }
144} 175}
145 176
146void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks, 177void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
147 int nums, void __iomem *base) 178 int nums, struct hisi_clock_data *data)
148{ 179{
149 struct clk *clk; 180 struct clk *clk;
181 void __iomem *base = data->base;
150 int i; 182 int i;
151 183
152 for (i = 0; i < nums; i++) { 184 for (i = 0; i < nums; i++) {
@@ -166,6 +198,6 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
166 if (clks[i].alias) 198 if (clks[i].alias)
167 clk_register_clkdev(clk, clks[i].alias, NULL); 199 clk_register_clkdev(clk, clks[i].alias, NULL);
168 200
169 clk_table[clks[i].id] = clk; 201 data->clk_data.clks[clks[i].id] = clk;
170 } 202 }
171} 203}
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index 4a6beebefb7a..43fa5da88f02 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -30,6 +30,11 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32 32
33struct hisi_clock_data {
34 struct clk_onecell_data clk_data;
35 void __iomem *base;
36};
37
33struct hisi_fixed_rate_clock { 38struct hisi_fixed_rate_clock {
34 unsigned int id; 39 unsigned int id;
35 char *name; 40 char *name;
@@ -89,15 +94,15 @@ struct clk *hisi_register_clkgate_sep(struct device *, const char *,
89 void __iomem *, u8, 94 void __iomem *, u8,
90 u8, spinlock_t *); 95 u8, spinlock_t *);
91 96
92void __init hisi_clk_init(struct device_node *, int); 97struct hisi_clock_data __init *hisi_clk_init(struct device_node *, int);
93void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *, 98void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
94 int, void __iomem *); 99 int, struct hisi_clock_data *);
95void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *, 100void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
96 int, void __iomem *); 101 int, struct hisi_clock_data *);
97void __init hisi_clk_register_mux(struct hisi_mux_clock *, int, 102void __init hisi_clk_register_mux(struct hisi_mux_clock *, int,
98 void __iomem *); 103 struct hisi_clock_data *);
99void __init hisi_clk_register_divider(struct hisi_divider_clock *, 104void __init hisi_clk_register_divider(struct hisi_divider_clock *,
100 int, void __iomem *); 105 int, struct hisi_clock_data *);
101void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *, 106void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *,
102 int, void __iomem *); 107 int, struct hisi_clock_data *);
103#endif /* __HISI_CLK_H */ 108#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 80c1dd15d15c..23a56f561812 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -40,15 +40,19 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
40 40
41 for (i = 0; i < factor->ftbl_cnt; i++) { 41 for (i = 0; i < factor->ftbl_cnt; i++) {
42 prev_rate = rate; 42 prev_rate = rate;
43 rate = (((*prate / 10000) * factor->ftbl[i].num) / 43 rate = (((*prate / 10000) * factor->ftbl[i].den) /
44 (factor->ftbl[i].den * factor->masks->factor)) * 10000; 44 (factor->ftbl[i].num * factor->masks->factor)) * 10000;
45 if (rate > drate) 45 if (rate > drate)
46 break; 46 break;
47 } 47 }
48 if (i == 0) 48 if ((i == 0) || (i == factor->ftbl_cnt)) {
49 return rate; 49 return rate;
50 else 50 } else {
51 return prev_rate; 51 if ((drate - prev_rate) > (rate - drate))
52 return rate;
53 else
54 return prev_rate;
55 }
52} 56}
53 57
54static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, 58static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -64,7 +68,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
64 num = (val >> masks->num_shift) & masks->num_mask; 68 num = (val >> masks->num_shift) & masks->num_mask;
65 69
66 /* calculate denominator */ 70 /* calculate denominator */
67 den = (val >> masks->den_shift) & masks->num_mask; 71 den = (val >> masks->den_shift) & masks->den_mask;
68 72
69 if (!den) 73 if (!den)
70 return 0; 74 return 0;
@@ -85,8 +89,8 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
85 89
86 for (i = 0; i < factor->ftbl_cnt; i++) { 90 for (i = 0; i < factor->ftbl_cnt; i++) {
87 prev_rate = rate; 91 prev_rate = rate;
88 rate = (((prate / 10000) * factor->ftbl[i].num) / 92 rate = (((prate / 10000) * factor->ftbl[i].den) /
89 (factor->ftbl[i].den * factor->masks->factor)) * 10000; 93 (factor->ftbl[i].num * factor->masks->factor)) * 10000;
90 if (rate > drate) 94 if (rate > drate)
91 break; 95 break;
92 } 96 }
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index c339b829d3e3..693f7be129f1 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -13,6 +13,14 @@ config ARMADA_370_CLK
13 select MVEBU_CLK_CPU 13 select MVEBU_CLK_CPU
14 select MVEBU_CLK_COREDIV 14 select MVEBU_CLK_COREDIV
15 15
16config ARMADA_375_CLK
17 bool
18 select MVEBU_CLK_COMMON
19
20config ARMADA_38X_CLK
21 bool
22 select MVEBU_CLK_COMMON
23
16config ARMADA_XP_CLK 24config ARMADA_XP_CLK
17 bool 25 bool
18 select MVEBU_CLK_COMMON 26 select MVEBU_CLK_COMMON
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 21bbfb4a9f42..4c66162fb0b4 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
3obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o 3obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
4 4
5obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o 5obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
6obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o
7obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o
6obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o 8obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
7obj-$(CONFIG_DOVE_CLK) += dove.o 9obj-$(CONFIG_DOVE_CLK) += dove.o
8obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o 10obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o
diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c
new file mode 100644
index 000000000000..c991a4d95e10
--- /dev/null
+++ b/drivers/clk/mvebu/armada-375.c
@@ -0,0 +1,184 @@
1/*
2 * Marvell Armada 375 SoC clocks
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8 * Andrew Lunn <andrew@lunn.ch>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/kernel.h>
16#include <linux/clk-provider.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include "common.h"
20
21/*
22 * Core Clocks
23 */
24
25/*
26 * For the Armada 375 SoCs, the CPU, DDR and L2 clocks frequencies are
27 * all modified at the same time, and not separately as for the Armada
28 * 370 or the Armada XP SoCs.
29 *
30 * SAR0[21:17] : CPU frequency DDR frequency L2 frequency
31 * 6 = 400 MHz 400 MHz 200 MHz
32 * 15 = 600 MHz 600 MHz 300 MHz
33 * 21 = 800 MHz 534 MHz 400 MHz
34 * 25 = 1000 MHz 500 MHz 500 MHz
35 * others reserved.
36 *
37 * SAR0[22] : TCLK frequency
38 * 0 = 166 MHz
39 * 1 = 200 MHz
40 */
41
42#define SAR1_A375_TCLK_FREQ_OPT 22
43#define SAR1_A375_TCLK_FREQ_OPT_MASK 0x1
44#define SAR1_A375_CPU_DDR_L2_FREQ_OPT 17
45#define SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK 0x1F
46
47static const u32 armada_375_tclk_frequencies[] __initconst = {
48 166000000,
49 200000000,
50};
51
52static u32 __init armada_375_get_tclk_freq(void __iomem *sar)
53{
54 u8 tclk_freq_select;
55
56 tclk_freq_select = ((readl(sar) >> SAR1_A375_TCLK_FREQ_OPT) &
57 SAR1_A375_TCLK_FREQ_OPT_MASK);
58 return armada_375_tclk_frequencies[tclk_freq_select];
59}
60
61
62static const u32 armada_375_cpu_frequencies[] __initconst = {
63 0, 0, 0, 0, 0, 0,
64 400000000,
65 0, 0, 0, 0, 0, 0, 0, 0,
66 600000000,
67 0, 0, 0, 0, 0,
68 800000000,
69 0, 0, 0,
70 1000000000,
71};
72
73static u32 __init armada_375_get_cpu_freq(void __iomem *sar)
74{
75 u8 cpu_freq_select;
76
77 cpu_freq_select = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
78 SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
79 if (cpu_freq_select >= ARRAY_SIZE(armada_375_cpu_frequencies)) {
80 pr_err("Selected CPU frequency (%d) unsupported\n",
81 cpu_freq_select);
82 return 0;
83 } else
84 return armada_375_cpu_frequencies[cpu_freq_select];
85}
86
87enum { A375_CPU_TO_DDR, A375_CPU_TO_L2 };
88
89static const struct coreclk_ratio armada_375_coreclk_ratios[] __initconst = {
90 { .id = A375_CPU_TO_L2, .name = "l2clk" },
91 { .id = A375_CPU_TO_DDR, .name = "ddrclk" },
92};
93
94static const int armada_375_cpu_l2_ratios[32][2] __initconst = {
95 {0, 1}, {0, 1}, {0, 1}, {0, 1},
96 {0, 1}, {0, 1}, {1, 2}, {0, 1},
97 {0, 1}, {0, 1}, {0, 1}, {0, 1},
98 {0, 1}, {0, 1}, {0, 1}, {1, 2},
99 {0, 1}, {0, 1}, {0, 1}, {0, 1},
100 {0, 1}, {1, 2}, {0, 1}, {0, 1},
101 {0, 1}, {1, 2}, {0, 1}, {0, 1},
102 {0, 1}, {0, 1}, {0, 1}, {0, 1},
103};
104
105static const int armada_375_cpu_ddr_ratios[32][2] __initconst = {
106 {0, 1}, {0, 1}, {0, 1}, {0, 1},
107 {0, 1}, {0, 1}, {1, 1}, {0, 1},
108 {0, 1}, {0, 1}, {0, 1}, {0, 1},
109 {0, 1}, {0, 1}, {0, 1}, {2, 3},
110 {0, 1}, {0, 1}, {0, 1}, {0, 1},
111 {0, 1}, {2, 3}, {0, 1}, {0, 1},
112 {0, 1}, {1, 2}, {0, 1}, {0, 1},
113 {0, 1}, {0, 1}, {0, 1}, {0, 1},
114};
115
116static void __init armada_375_get_clk_ratio(
117 void __iomem *sar, int id, int *mult, int *div)
118{
119 u32 opt = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
120 SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
121
122 switch (id) {
123 case A375_CPU_TO_L2:
124 *mult = armada_375_cpu_l2_ratios[opt][0];
125 *div = armada_375_cpu_l2_ratios[opt][1];
126 break;
127 case A375_CPU_TO_DDR:
128 *mult = armada_375_cpu_ddr_ratios[opt][0];
129 *div = armada_375_cpu_ddr_ratios[opt][1];
130 break;
131 }
132}
133
134static const struct coreclk_soc_desc armada_375_coreclks = {
135 .get_tclk_freq = armada_375_get_tclk_freq,
136 .get_cpu_freq = armada_375_get_cpu_freq,
137 .get_clk_ratio = armada_375_get_clk_ratio,
138 .ratios = armada_375_coreclk_ratios,
139 .num_ratios = ARRAY_SIZE(armada_375_coreclk_ratios),
140};
141
142static void __init armada_375_coreclk_init(struct device_node *np)
143{
144 mvebu_coreclk_setup(np, &armada_375_coreclks);
145}
146CLK_OF_DECLARE(armada_375_core_clk, "marvell,armada-375-core-clock",
147 armada_375_coreclk_init);
148
149/*
150 * Clock Gating Control
151 */
152static const struct clk_gating_soc_desc armada_375_gating_desc[] __initconst = {
153 { "mu", NULL, 2 },
154 { "pp", NULL, 3 },
155 { "ptp", NULL, 4 },
156 { "pex0", NULL, 5 },
157 { "pex1", NULL, 6 },
158 { "audio", NULL, 8 },
159 { "nd_clk", "nand", 11 },
160 { "sata0_link", "sata0_core", 14 },
161 { "sata0_core", NULL, 15 },
162 { "usb3", NULL, 16 },
163 { "sdio", NULL, 17 },
164 { "usb", NULL, 18 },
165 { "gop", NULL, 19 },
166 { "sata1_link", "sata1_core", 20 },
167 { "sata1_core", NULL, 21 },
168 { "xor0", NULL, 22 },
169 { "xor1", NULL, 23 },
170 { "copro", NULL, 24 },
171 { "tdm", NULL, 25 },
172 { "crypto0_enc", NULL, 28 },
173 { "crypto0_core", NULL, 29 },
174 { "crypto1_enc", NULL, 30 },
175 { "crypto1_core", NULL, 31 },
176 { }
177};
178
179static void __init armada_375_clk_gating_init(struct device_node *np)
180{
181 mvebu_clk_gating_setup(np, armada_375_gating_desc);
182}
183CLK_OF_DECLARE(armada_375_clk_gating, "marvell,armada-375-gating-clock",
184 armada_375_clk_gating_init);
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
new file mode 100644
index 000000000000..8bccf4ecdab6
--- /dev/null
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -0,0 +1,167 @@
1/*
2 * Marvell Armada 380/385 SoC clocks
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8 * Andrew Lunn <andrew@lunn.ch>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/kernel.h>
16#include <linux/clk-provider.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include "common.h"
20
21/*
22 * SAR[14:10] : Ratios between PCLK0, NBCLK, HCLK and DRAM clocks
23 *
24 * SAR[15] : TCLK frequency
25 * 0 = 250 MHz
26 * 1 = 200 MHz
27 */
28
29#define SAR_A380_TCLK_FREQ_OPT 15
30#define SAR_A380_TCLK_FREQ_OPT_MASK 0x1
31#define SAR_A380_CPU_DDR_L2_FREQ_OPT 10
32#define SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK 0x1F
33
34static const u32 armada_38x_tclk_frequencies[] __initconst = {
35 250000000,
36 200000000,
37};
38
39static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
40{
41 u8 tclk_freq_select;
42
43 tclk_freq_select = ((readl(sar) >> SAR_A380_TCLK_FREQ_OPT) &
44 SAR_A380_TCLK_FREQ_OPT_MASK);
45 return armada_38x_tclk_frequencies[tclk_freq_select];
46}
47
48static const u32 armada_38x_cpu_frequencies[] __initconst = {
49 0, 0, 0, 0,
50 1066 * 1000 * 1000, 0, 0, 0,
51 1332 * 1000 * 1000, 0, 0, 0,
52 1600 * 1000 * 1000,
53};
54
55static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
56{
57 u8 cpu_freq_select;
58
59 cpu_freq_select = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
60 SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
61 if (cpu_freq_select >= ARRAY_SIZE(armada_38x_cpu_frequencies)) {
62 pr_err("Selected CPU frequency (%d) unsupported\n",
63 cpu_freq_select);
64 return 0;
65 }
66
67 return armada_38x_cpu_frequencies[cpu_freq_select];
68}
69
70enum { A380_CPU_TO_DDR, A380_CPU_TO_L2 };
71
72static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
73 { .id = A380_CPU_TO_L2, .name = "l2clk" },
74 { .id = A380_CPU_TO_DDR, .name = "ddrclk" },
75};
76
77static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
78 {0, 1}, {0, 1}, {0, 1}, {0, 1},
79 {1, 2}, {0, 1}, {0, 1}, {0, 1},
80 {1, 2}, {0, 1}, {0, 1}, {0, 1},
81 {1, 2}, {0, 1}, {0, 1}, {0, 1},
82 {0, 1}, {0, 1}, {0, 1}, {0, 1},
83 {0, 1}, {0, 1}, {0, 1}, {0, 1},
84 {0, 1}, {0, 1}, {0, 1}, {0, 1},
85 {0, 1}, {0, 1}, {0, 1}, {0, 1},
86};
87
88static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
89 {0, 1}, {0, 1}, {0, 1}, {0, 1},
90 {1, 2}, {0, 1}, {0, 1}, {0, 1},
91 {1, 2}, {0, 1}, {0, 1}, {0, 1},
92 {1, 2}, {0, 1}, {0, 1}, {0, 1},
93 {0, 1}, {0, 1}, {0, 1}, {0, 1},
94 {0, 1}, {0, 1}, {0, 1}, {0, 1},
95 {0, 1}, {0, 1}, {0, 1}, {0, 1},
96 {0, 1}, {0, 1}, {0, 1}, {0, 1},
97};
98
99static void __init armada_38x_get_clk_ratio(
100 void __iomem *sar, int id, int *mult, int *div)
101{
102 u32 opt = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
103 SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
104
105 switch (id) {
106 case A380_CPU_TO_L2:
107 *mult = armada_38x_cpu_l2_ratios[opt][0];
108 *div = armada_38x_cpu_l2_ratios[opt][1];
109 break;
110 case A380_CPU_TO_DDR:
111 *mult = armada_38x_cpu_ddr_ratios[opt][0];
112 *div = armada_38x_cpu_ddr_ratios[opt][1];
113 break;
114 }
115}
116
117static const struct coreclk_soc_desc armada_38x_coreclks = {
118 .get_tclk_freq = armada_38x_get_tclk_freq,
119 .get_cpu_freq = armada_38x_get_cpu_freq,
120 .get_clk_ratio = armada_38x_get_clk_ratio,
121 .ratios = armada_38x_coreclk_ratios,
122 .num_ratios = ARRAY_SIZE(armada_38x_coreclk_ratios),
123};
124
125static void __init armada_38x_coreclk_init(struct device_node *np)
126{
127 mvebu_coreclk_setup(np, &armada_38x_coreclks);
128}
129CLK_OF_DECLARE(armada_38x_core_clk, "marvell,armada-380-core-clock",
130 armada_38x_coreclk_init);
131
132/*
133 * Clock Gating Control
134 */
135static const struct clk_gating_soc_desc armada_38x_gating_desc[] __initconst = {
136 { "audio", NULL, 0 },
137 { "ge2", NULL, 2 },
138 { "ge1", NULL, 3 },
139 { "ge0", NULL, 4 },
140 { "pex1", NULL, 5 },
141 { "pex2", NULL, 6 },
142 { "pex3", NULL, 7 },
143 { "pex0", NULL, 8 },
144 { "usb3h0", NULL, 9 },
145 { "usb3h1", NULL, 10 },
146 { "usb3d", NULL, 11 },
147 { "bm", NULL, 13 },
148 { "crypto0z", NULL, 14 },
149 { "sata0", NULL, 15 },
150 { "crypto1z", NULL, 16 },
151 { "sdio", NULL, 17 },
152 { "usb2", NULL, 18 },
153 { "crypto1", NULL, 21 },
154 { "xor0", NULL, 22 },
155 { "crypto0", NULL, 23 },
156 { "tdm", NULL, 25 },
157 { "xor1", NULL, 28 },
158 { "sata1", NULL, 30 },
159 { }
160};
161
162static void __init armada_38x_clk_gating_init(struct device_node *np)
163{
164 mvebu_clk_gating_setup(np, armada_38x_gating_desc);
165}
166CLK_OF_DECLARE(armada_38x_clk_gating, "marvell,armada-380-gating-clock",
167 armada_38x_clk_gating_init);
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 7162615bcdcd..d1e5863d3375 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -18,26 +18,56 @@
18#include "common.h" 18#include "common.h"
19 19
20#define CORE_CLK_DIV_RATIO_MASK 0xff 20#define CORE_CLK_DIV_RATIO_MASK 0xff
21#define CORE_CLK_DIV_RATIO_RELOAD BIT(8)
22#define CORE_CLK_DIV_ENABLE_OFFSET 24
23#define CORE_CLK_DIV_RATIO_OFFSET 0x8
24 21
22/*
23 * This structure describes the hardware details (bit offset and mask)
24 * to configure one particular core divider clock. Those hardware
25 * details may differ from one SoC to another. This structure is
26 * therefore typically instantiated statically to describe the
27 * hardware details.
28 */
25struct clk_corediv_desc { 29struct clk_corediv_desc {
26 unsigned int mask; 30 unsigned int mask;
27 unsigned int offset; 31 unsigned int offset;
28 unsigned int fieldbit; 32 unsigned int fieldbit;
29}; 33};
30 34
35/*
36 * This structure describes the hardware details to configure the core
37 * divider clocks on a given SoC. Amongst others, it points to the
38 * array of core divider clock descriptors for this SoC, as well as
39 * the corresponding operations to manipulate them.
40 */
41struct clk_corediv_soc_desc {
42 const struct clk_corediv_desc *descs;
43 unsigned int ndescs;
44 const struct clk_ops ops;
45 u32 ratio_reload;
46 u32 enable_bit_offset;
47 u32 ratio_offset;
48};
49
50/*
51 * This structure represents one core divider clock for the clock
52 * framework, and is dynamically allocated for each core divider clock
53 * existing in the current SoC.
54 */
31struct clk_corediv { 55struct clk_corediv {
32 struct clk_hw hw; 56 struct clk_hw hw;
33 void __iomem *reg; 57 void __iomem *reg;
34 struct clk_corediv_desc desc; 58 const struct clk_corediv_desc *desc;
59 const struct clk_corediv_soc_desc *soc_desc;
35 spinlock_t lock; 60 spinlock_t lock;
36}; 61};
37 62
38static struct clk_onecell_data clk_data; 63static struct clk_onecell_data clk_data;
39 64
40static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = { 65/*
66 * Description of the core divider clocks available. For now, we
67 * support only NAND, and it is available at the same register
68 * locations regardless of the SoC.
69 */
70static const struct clk_corediv_desc mvebu_corediv_desc[] = {
41 { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */ 71 { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
42}; 72};
43 73
@@ -46,8 +76,9 @@ static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
46static int clk_corediv_is_enabled(struct clk_hw *hwclk) 76static int clk_corediv_is_enabled(struct clk_hw *hwclk)
47{ 77{
48 struct clk_corediv *corediv = to_corediv_clk(hwclk); 78 struct clk_corediv *corediv = to_corediv_clk(hwclk);
49 struct clk_corediv_desc *desc = &corediv->desc; 79 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
50 u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET; 80 const struct clk_corediv_desc *desc = corediv->desc;
81 u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
51 82
52 return !!(readl(corediv->reg) & enable_mask); 83 return !!(readl(corediv->reg) & enable_mask);
53} 84}
@@ -55,14 +86,15 @@ static int clk_corediv_is_enabled(struct clk_hw *hwclk)
55static int clk_corediv_enable(struct clk_hw *hwclk) 86static int clk_corediv_enable(struct clk_hw *hwclk)
56{ 87{
57 struct clk_corediv *corediv = to_corediv_clk(hwclk); 88 struct clk_corediv *corediv = to_corediv_clk(hwclk);
58 struct clk_corediv_desc *desc = &corediv->desc; 89 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
90 const struct clk_corediv_desc *desc = corediv->desc;
59 unsigned long flags = 0; 91 unsigned long flags = 0;
60 u32 reg; 92 u32 reg;
61 93
62 spin_lock_irqsave(&corediv->lock, flags); 94 spin_lock_irqsave(&corediv->lock, flags);
63 95
64 reg = readl(corediv->reg); 96 reg = readl(corediv->reg);
65 reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET); 97 reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
66 writel(reg, corediv->reg); 98 writel(reg, corediv->reg);
67 99
68 spin_unlock_irqrestore(&corediv->lock, flags); 100 spin_unlock_irqrestore(&corediv->lock, flags);
@@ -73,14 +105,15 @@ static int clk_corediv_enable(struct clk_hw *hwclk)
73static void clk_corediv_disable(struct clk_hw *hwclk) 105static void clk_corediv_disable(struct clk_hw *hwclk)
74{ 106{
75 struct clk_corediv *corediv = to_corediv_clk(hwclk); 107 struct clk_corediv *corediv = to_corediv_clk(hwclk);
76 struct clk_corediv_desc *desc = &corediv->desc; 108 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
109 const struct clk_corediv_desc *desc = corediv->desc;
77 unsigned long flags = 0; 110 unsigned long flags = 0;
78 u32 reg; 111 u32 reg;
79 112
80 spin_lock_irqsave(&corediv->lock, flags); 113 spin_lock_irqsave(&corediv->lock, flags);
81 114
82 reg = readl(corediv->reg); 115 reg = readl(corediv->reg);
83 reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET); 116 reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
84 writel(reg, corediv->reg); 117 writel(reg, corediv->reg);
85 118
86 spin_unlock_irqrestore(&corediv->lock, flags); 119 spin_unlock_irqrestore(&corediv->lock, flags);
@@ -90,10 +123,11 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
90 unsigned long parent_rate) 123 unsigned long parent_rate)
91{ 124{
92 struct clk_corediv *corediv = to_corediv_clk(hwclk); 125 struct clk_corediv *corediv = to_corediv_clk(hwclk);
93 struct clk_corediv_desc *desc = &corediv->desc; 126 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
127 const struct clk_corediv_desc *desc = corediv->desc;
94 u32 reg, div; 128 u32 reg, div;
95 129
96 reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); 130 reg = readl(corediv->reg + soc_desc->ratio_offset);
97 div = (reg >> desc->offset) & desc->mask; 131 div = (reg >> desc->offset) & desc->mask;
98 return parent_rate / div; 132 return parent_rate / div;
99} 133}
@@ -117,7 +151,8 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
117 unsigned long parent_rate) 151 unsigned long parent_rate)
118{ 152{
119 struct clk_corediv *corediv = to_corediv_clk(hwclk); 153 struct clk_corediv *corediv = to_corediv_clk(hwclk);
120 struct clk_corediv_desc *desc = &corediv->desc; 154 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
155 const struct clk_corediv_desc *desc = corediv->desc;
121 unsigned long flags = 0; 156 unsigned long flags = 0;
122 u32 reg, div; 157 u32 reg, div;
123 158
@@ -126,17 +161,17 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
126 spin_lock_irqsave(&corediv->lock, flags); 161 spin_lock_irqsave(&corediv->lock, flags);
127 162
128 /* Write new divider to the divider ratio register */ 163 /* Write new divider to the divider ratio register */
129 reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); 164 reg = readl(corediv->reg + soc_desc->ratio_offset);
130 reg &= ~(desc->mask << desc->offset); 165 reg &= ~(desc->mask << desc->offset);
131 reg |= (div & desc->mask) << desc->offset; 166 reg |= (div & desc->mask) << desc->offset;
132 writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); 167 writel(reg, corediv->reg + soc_desc->ratio_offset);
133 168
134 /* Set reload-force for this clock */ 169 /* Set reload-force for this clock */
135 reg = readl(corediv->reg) | BIT(desc->fieldbit); 170 reg = readl(corediv->reg) | BIT(desc->fieldbit);
136 writel(reg, corediv->reg); 171 writel(reg, corediv->reg);
137 172
138 /* Now trigger the clock update */ 173 /* Now trigger the clock update */
139 reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD; 174 reg = readl(corediv->reg) | soc_desc->ratio_reload;
140 writel(reg, corediv->reg); 175 writel(reg, corediv->reg);
141 176
142 /* 177 /*
@@ -144,7 +179,7 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
144 * ratios request and the reload request. 179 * ratios request and the reload request.
145 */ 180 */
146 udelay(1000); 181 udelay(1000);
147 reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD); 182 reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
148 writel(reg, corediv->reg); 183 writel(reg, corediv->reg);
149 udelay(1000); 184 udelay(1000);
150 185
@@ -153,16 +188,53 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
153 return 0; 188 return 0;
154} 189}
155 190
156static const struct clk_ops corediv_ops = { 191static const struct clk_corediv_soc_desc armada370_corediv_soc = {
157 .enable = clk_corediv_enable, 192 .descs = mvebu_corediv_desc,
158 .disable = clk_corediv_disable, 193 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
159 .is_enabled = clk_corediv_is_enabled, 194 .ops = {
160 .recalc_rate = clk_corediv_recalc_rate, 195 .enable = clk_corediv_enable,
161 .round_rate = clk_corediv_round_rate, 196 .disable = clk_corediv_disable,
162 .set_rate = clk_corediv_set_rate, 197 .is_enabled = clk_corediv_is_enabled,
198 .recalc_rate = clk_corediv_recalc_rate,
199 .round_rate = clk_corediv_round_rate,
200 .set_rate = clk_corediv_set_rate,
201 },
202 .ratio_reload = BIT(8),
203 .enable_bit_offset = 24,
204 .ratio_offset = 0x8,
205};
206
207static const struct clk_corediv_soc_desc armada380_corediv_soc = {
208 .descs = mvebu_corediv_desc,
209 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
210 .ops = {
211 .enable = clk_corediv_enable,
212 .disable = clk_corediv_disable,
213 .is_enabled = clk_corediv_is_enabled,
214 .recalc_rate = clk_corediv_recalc_rate,
215 .round_rate = clk_corediv_round_rate,
216 .set_rate = clk_corediv_set_rate,
217 },
218 .ratio_reload = BIT(8),
219 .enable_bit_offset = 16,
220 .ratio_offset = 0x4,
163}; 221};
164 222
165static void __init mvebu_corediv_clk_init(struct device_node *node) 223static const struct clk_corediv_soc_desc armada375_corediv_soc = {
224 .descs = mvebu_corediv_desc,
225 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
226 .ops = {
227 .recalc_rate = clk_corediv_recalc_rate,
228 .round_rate = clk_corediv_round_rate,
229 .set_rate = clk_corediv_set_rate,
230 },
231 .ratio_reload = BIT(8),
232 .ratio_offset = 0x4,
233};
234
235static void __init
236mvebu_corediv_clk_init(struct device_node *node,
237 const struct clk_corediv_soc_desc *soc_desc)
166{ 238{
167 struct clk_init_data init; 239 struct clk_init_data init;
168 struct clk_corediv *corediv; 240 struct clk_corediv *corediv;
@@ -178,7 +250,7 @@ static void __init mvebu_corediv_clk_init(struct device_node *node)
178 250
179 parent_name = of_clk_get_parent_name(node, 0); 251 parent_name = of_clk_get_parent_name(node, 0);
180 252
181 clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc); 253 clk_data.clk_num = soc_desc->ndescs;
182 254
183 /* clks holds the clock array */ 255 /* clks holds the clock array */
184 clks = kcalloc(clk_data.clk_num, sizeof(struct clk *), 256 clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
@@ -199,10 +271,11 @@ static void __init mvebu_corediv_clk_init(struct device_node *node)
199 init.num_parents = 1; 271 init.num_parents = 1;
200 init.parent_names = &parent_name; 272 init.parent_names = &parent_name;
201 init.name = clk_name; 273 init.name = clk_name;
202 init.ops = &corediv_ops; 274 init.ops = &soc_desc->ops;
203 init.flags = 0; 275 init.flags = 0;
204 276
205 corediv[i].desc = mvebu_corediv_desc[i]; 277 corediv[i].soc_desc = soc_desc;
278 corediv[i].desc = soc_desc->descs + i;
206 corediv[i].reg = base; 279 corediv[i].reg = base;
207 corediv[i].hw.init = &init; 280 corediv[i].hw.init = &init;
208 281
@@ -219,5 +292,24 @@ err_free_clks:
219err_unmap: 292err_unmap:
220 iounmap(base); 293 iounmap(base);
221} 294}
222CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock", 295
223 mvebu_corediv_clk_init); 296static void __init armada370_corediv_clk_init(struct device_node *node)
297{
298 return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
299}
300CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
301 armada370_corediv_clk_init);
302
303static void __init armada375_corediv_clk_init(struct device_node *node)
304{
305 return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
306}
307CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
308 armada375_corediv_clk_init);
309
310static void __init armada380_corediv_clk_init(struct device_node *node)
311{
312 return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
313}
314CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
315 armada380_corediv_clk_init);
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index 9ecef140dba7..5404cb931ebf 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o 1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
2obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
2obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o 3obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
3obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o 4obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
4obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o 5obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index aac4756ec52e..f065f694cb65 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -23,7 +23,7 @@
23#define CPG_DIV6_DIV_MASK 0x3f 23#define CPG_DIV6_DIV_MASK 0x3f
24 24
25/** 25/**
26 * struct div6_clock - MSTP gating clock 26 * struct div6_clock - CPG 6 bit divider clock
27 * @hw: handle between common and hardware-specific interfaces 27 * @hw: handle between common and hardware-specific interfaces
28 * @reg: IO-remapped register 28 * @reg: IO-remapped register
29 * @div: divisor value (1-64) 29 * @div: divisor value (1-64)
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 42d5912b1d25..2e5810c88d11 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -137,7 +137,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
137 137
138 init.name = name; 138 init.name = name;
139 init.ops = &cpg_mstp_clock_ops; 139 init.ops = &cpg_mstp_clock_ops;
140 init.flags = CLK_IS_BASIC; 140 init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
141 init.parent_names = &parent_name; 141 init.parent_names = &parent_name;
142 init.num_parents = 1; 142 init.num_parents = 1;
143 143
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index 99c27b1c625b..dff7f79a19b9 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -242,22 +242,22 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
242 parent_name = "main"; 242 parent_name = "main";
243 mult = config->pll3_mult; 243 mult = config->pll3_mult;
244 } else if (!strcmp(name, "lb")) { 244 } else if (!strcmp(name, "lb")) {
245 parent_name = "pll1_div2"; 245 parent_name = "pll1";
246 div = cpg_mode & BIT(18) ? 36 : 24; 246 div = cpg_mode & BIT(18) ? 36 : 24;
247 } else if (!strcmp(name, "qspi")) { 247 } else if (!strcmp(name, "qspi")) {
248 parent_name = "pll1_div2"; 248 parent_name = "pll1_div2";
249 div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) 249 div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
250 ? 8 : 10; 250 ? 8 : 10;
251 } else if (!strcmp(name, "sdh")) { 251 } else if (!strcmp(name, "sdh")) {
252 parent_name = "pll1_div2"; 252 parent_name = "pll1";
253 table = cpg_sdh_div_table; 253 table = cpg_sdh_div_table;
254 shift = 8; 254 shift = 8;
255 } else if (!strcmp(name, "sd0")) { 255 } else if (!strcmp(name, "sd0")) {
256 parent_name = "pll1_div2"; 256 parent_name = "pll1";
257 table = cpg_sd01_div_table; 257 table = cpg_sd01_div_table;
258 shift = 4; 258 shift = 4;
259 } else if (!strcmp(name, "sd1")) { 259 } else if (!strcmp(name, "sd1")) {
260 parent_name = "pll1_div2"; 260 parent_name = "pll1";
261 table = cpg_sd01_div_table; 261 table = cpg_sd01_div_table;
262 shift = 0; 262 shift = 0;
263 } else if (!strcmp(name, "z")) { 263 } else if (!strcmp(name, "z")) {
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c
new file mode 100644
index 000000000000..7e68e8630962
--- /dev/null
+++ b/drivers/clk/shmobile/clk-rz.c
@@ -0,0 +1,103 @@
1/*
2 * rz Core CPG Clocks
3 *
4 * Copyright (C) 2013 Ideas On Board SPRL
5 * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/slab.h>
18
19struct rz_cpg {
20 struct clk_onecell_data data;
21 void __iomem *reg;
22};
23
24#define CPG_FRQCR 0x10
25#define CPG_FRQCR2 0x14
26
27/* -----------------------------------------------------------------------------
28 * Initialization
29 */
30
31static struct clk * __init
32rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
33{
34 u32 val;
35 unsigned mult;
36 static const unsigned frqcr_tab[4] = { 3, 2, 0, 1 };
37
38 if (strcmp(name, "pll") == 0) {
39 /* FIXME: cpg_mode should be read from GPIO. But no GPIO support yet */
40 unsigned cpg_mode = 0; /* hardcoded to EXTAL for now */
41 const char *parent_name = of_clk_get_parent_name(np, cpg_mode);
42
43 mult = cpg_mode ? (32 / 4) : 30;
44
45 return clk_register_fixed_factor(NULL, name, parent_name, 0, mult, 1);
46 }
47
48 /* If mapping regs failed, skip non-pll clocks. System will boot anyhow */
49 if (!cpg->reg)
50 return ERR_PTR(-ENXIO);
51
52 /* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3)
53 * and the constraint that always g <= i. To get the rz platform started,
54 * let them run at fixed current speed and implement the details later.
55 */
56 if (strcmp(name, "i") == 0)
57 val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
58 else if (strcmp(name, "g") == 0)
59 val = clk_readl(cpg->reg + CPG_FRQCR2) & 3;
60 else
61 return ERR_PTR(-EINVAL);
62
63 mult = frqcr_tab[val];
64 return clk_register_fixed_factor(NULL, name, "pll", 0, mult, 3);
65}
66
67static void __init rz_cpg_clocks_init(struct device_node *np)
68{
69 struct rz_cpg *cpg;
70 struct clk **clks;
71 unsigned i;
72 int num_clks;
73
74 num_clks = of_property_count_strings(np, "clock-output-names");
75 if (WARN(num_clks <= 0, "can't count CPG clocks\n"))
76 return;
77
78 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
79 clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
80 BUG_ON(!cpg || !clks);
81
82 cpg->data.clks = clks;
83 cpg->data.clk_num = num_clks;
84
85 cpg->reg = of_iomap(np, 0);
86
87 for (i = 0; i < num_clks; ++i) {
88 const char *name;
89 struct clk *clk;
90
91 of_property_read_string_index(np, "clock-output-names", i, &name);
92
93 clk = rz_cpg_register_clock(np, cpg, name);
94 if (IS_ERR(clk))
95 pr_err("%s: failed to register %s %s clock (%ld)\n",
96 __func__, np->name, name, PTR_ERR(clk));
97 else
98 cpg->data.clks[i] = clk;
99 }
100
101 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
102}
103CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init);
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index f9f4a15a64ab..d63b76ca60c3 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Clock tree for CSR SiRFatlasVI 2 * Clock tree for CSR SiRFatlasVI
3 * 3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 4 * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
5 * company.
5 * 6 *
6 * Licensed under GPLv2 or later. 7 * Licensed under GPLv2 or later.
7 */ 8 */
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 7dde6a82f514..37af51c5f213 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * common clks module for all SiRF SoCs 2 * common clks module for all SiRF SoCs
3 * 3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 4 * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
5 * company.
5 * 6 *
6 * Licensed under GPLv2 or later. 7 * Licensed under GPLv2 or later.
7 */ 8 */
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index 7adc5c70c7ff..6968e2ebcd8a 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Clock tree for CSR SiRFprimaII 2 * Clock tree for CSR SiRFprimaII
3 * 3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 4 * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
5 * company.
5 * 6 *
6 * Licensed under GPLv2 or later. 7 * Licensed under GPLv2 or later.
7 */ 8 */
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index 0303c0b99cd0..7e2d15a0c7b8 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1 +1,4 @@
1obj-y += clk.o 1obj-y += clk.o
2obj-y += clk-gate.o
3obj-y += clk-pll.o
4obj-y += clk-periph.o
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
new file mode 100644
index 000000000000..501d513bf890
--- /dev/null
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Based from clk-highbank.c
16 *
17 */
18#include <linux/clk.h>
19#include <linux/clkdev.h>
20#include <linux/clk-provider.h>
21#include <linux/io.h>
22#include <linux/mfd/syscon.h>
23#include <linux/of.h>
24#include <linux/regmap.h>
25
26#include "clk.h"
27
28#define SOCFPGA_L4_MP_CLK "l4_mp_clk"
29#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
30#define SOCFPGA_NAND_CLK "nand_clk"
31#define SOCFPGA_NAND_X_CLK "nand_x_clk"
32#define SOCFPGA_MMC_CLK "sdmmc_clk"
33#define SOCFPGA_GPIO_DB_CLK_OFFSET 0xA8
34
35#define div_mask(width) ((1 << (width)) - 1)
36#define streq(a, b) (strcmp((a), (b)) == 0)
37
38#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
39
40/* SDMMC Group for System Manager defines */
41#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x108
42#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
43 ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
44
45static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
46{
47 u32 l4_src;
48 u32 perpll_src;
49
50 if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
51 l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
52 return l4_src &= 0x1;
53 }
54 if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
55 l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
56 return !!(l4_src & 2);
57 }
58
59 perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
60 if (streq(hwclk->init->name, SOCFPGA_MMC_CLK))
61 return perpll_src &= 0x3;
62 if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
63 streq(hwclk->init->name, SOCFPGA_NAND_X_CLK))
64 return (perpll_src >> 2) & 3;
65
66 /* QSPI clock */
67 return (perpll_src >> 4) & 3;
68
69}
70
71static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
72{
73 u32 src_reg;
74
75 if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
76 src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
77 src_reg &= ~0x1;
78 src_reg |= parent;
79 writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
80 } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
81 src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
82 src_reg &= ~0x2;
83 src_reg |= (parent << 1);
84 writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
85 } else {
86 src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
87 if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) {
88 src_reg &= ~0x3;
89 src_reg |= parent;
90 } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
91 streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) {
92 src_reg &= ~0xC;
93 src_reg |= (parent << 2);
94 } else {/* QSPI clock */
95 src_reg &= ~0x30;
96 src_reg |= (parent << 4);
97 }
98 writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
99 }
100
101 return 0;
102}
103
104static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
105 unsigned long parent_rate)
106{
107 struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
108 u32 div = 1, val;
109
110 if (socfpgaclk->fixed_div)
111 div = socfpgaclk->fixed_div;
112 else if (socfpgaclk->div_reg) {
113 val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
114 val &= div_mask(socfpgaclk->width);
115 /* Check for GPIO_DB_CLK by its offset */
116 if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
117 div = val + 1;
118 else
119 div = (1 << val);
120 }
121
122 return parent_rate / div;
123}
124
125static int socfpga_clk_prepare(struct clk_hw *hwclk)
126{
127 struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
128 struct regmap *sys_mgr_base_addr;
129 int i;
130 u32 hs_timing;
131 u32 clk_phase[2];
132
133 if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) {
134 sys_mgr_base_addr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
135 if (IS_ERR(sys_mgr_base_addr)) {
136 pr_err("%s: failed to find altr,sys-mgr regmap!\n", __func__);
137 return -EINVAL;
138 }
139
140 for (i = 0; i < 2; i++) {
141 switch (socfpgaclk->clk_phase[i]) {
142 case 0:
143 clk_phase[i] = 0;
144 break;
145 case 45:
146 clk_phase[i] = 1;
147 break;
148 case 90:
149 clk_phase[i] = 2;
150 break;
151 case 135:
152 clk_phase[i] = 3;
153 break;
154 case 180:
155 clk_phase[i] = 4;
156 break;
157 case 225:
158 clk_phase[i] = 5;
159 break;
160 case 270:
161 clk_phase[i] = 6;
162 break;
163 case 315:
164 clk_phase[i] = 7;
165 break;
166 default:
167 clk_phase[i] = 0;
168 break;
169 }
170 }
171 hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]);
172 regmap_write(sys_mgr_base_addr, SYSMGR_SDMMCGRP_CTRL_OFFSET,
173 hs_timing);
174 }
175 return 0;
176}
177
178static struct clk_ops gateclk_ops = {
179 .prepare = socfpga_clk_prepare,
180 .recalc_rate = socfpga_clk_recalc_rate,
181 .get_parent = socfpga_clk_get_parent,
182 .set_parent = socfpga_clk_set_parent,
183};
184
185static void __init __socfpga_gate_init(struct device_node *node,
186 const struct clk_ops *ops)
187{
188 u32 clk_gate[2];
189 u32 div_reg[3];
190 u32 clk_phase[2];
191 u32 fixed_div;
192 struct clk *clk;
193 struct socfpga_gate_clk *socfpga_clk;
194 const char *clk_name = node->name;
195 const char *parent_name[SOCFPGA_MAX_PARENTS];
196 struct clk_init_data init;
197 int rc;
198 int i = 0;
199
200 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
201 if (WARN_ON(!socfpga_clk))
202 return;
203
204 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
205 if (rc)
206 clk_gate[0] = 0;
207
208 if (clk_gate[0]) {
209 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
210 socfpga_clk->hw.bit_idx = clk_gate[1];
211
212 gateclk_ops.enable = clk_gate_ops.enable;
213 gateclk_ops.disable = clk_gate_ops.disable;
214 }
215
216 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
217 if (rc)
218 socfpga_clk->fixed_div = 0;
219 else
220 socfpga_clk->fixed_div = fixed_div;
221
222 rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
223 if (!rc) {
224 socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0];
225 socfpga_clk->shift = div_reg[1];
226 socfpga_clk->width = div_reg[2];
227 } else {
228 socfpga_clk->div_reg = 0;
229 }
230
231 rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2);
232 if (!rc) {
233 socfpga_clk->clk_phase[0] = clk_phase[0];
234 socfpga_clk->clk_phase[1] = clk_phase[1];
235 }
236
237 of_property_read_string(node, "clock-output-names", &clk_name);
238
239 init.name = clk_name;
240 init.ops = ops;
241 init.flags = 0;
242 while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
243 of_clk_get_parent_name(node, i)) != NULL)
244 i++;
245
246 init.parent_names = parent_name;
247 init.num_parents = i;
248 socfpga_clk->hw.hw.init = &init;
249
250 clk = clk_register(NULL, &socfpga_clk->hw.hw);
251 if (WARN_ON(IS_ERR(clk))) {
252 kfree(socfpga_clk);
253 return;
254 }
255 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
256 if (WARN_ON(rc))
257 return;
258}
259
260void __init socfpga_gate_init(struct device_node *node)
261{
262 __socfpga_gate_init(node, &gateclk_ops);
263}
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
new file mode 100644
index 000000000000..81623a3736f9
--- /dev/null
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Based from clk-highbank.c
16 *
17 */
18#include <linux/clk.h>
19#include <linux/clkdev.h>
20#include <linux/clk-provider.h>
21#include <linux/io.h>
22#include <linux/of.h>
23
24#include "clk.h"
25
26#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
27
28static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
29 unsigned long parent_rate)
30{
31 struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk);
32 u32 div;
33
34 if (socfpgaclk->fixed_div)
35 div = socfpgaclk->fixed_div;
36 else
37 div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
38
39 return parent_rate / div;
40}
41
42static const struct clk_ops periclk_ops = {
43 .recalc_rate = clk_periclk_recalc_rate,
44};
45
46static __init void __socfpga_periph_init(struct device_node *node,
47 const struct clk_ops *ops)
48{
49 u32 reg;
50 struct clk *clk;
51 struct socfpga_periph_clk *periph_clk;
52 const char *clk_name = node->name;
53 const char *parent_name;
54 struct clk_init_data init;
55 int rc;
56 u32 fixed_div;
57
58 of_property_read_u32(node, "reg", &reg);
59
60 periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
61 if (WARN_ON(!periph_clk))
62 return;
63
64 periph_clk->hw.reg = clk_mgr_base_addr + reg;
65
66 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
67 if (rc)
68 periph_clk->fixed_div = 0;
69 else
70 periph_clk->fixed_div = fixed_div;
71
72 of_property_read_string(node, "clock-output-names", &clk_name);
73
74 init.name = clk_name;
75 init.ops = ops;
76 init.flags = 0;
77 parent_name = of_clk_get_parent_name(node, 0);
78 init.parent_names = &parent_name;
79 init.num_parents = 1;
80
81 periph_clk->hw.hw.init = &init;
82
83 clk = clk_register(NULL, &periph_clk->hw.hw);
84 if (WARN_ON(IS_ERR(clk))) {
85 kfree(periph_clk);
86 return;
87 }
88 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
89}
90
91void __init socfpga_periph_init(struct device_node *node)
92{
93 __socfpga_periph_init(node, &periclk_ops);
94}
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
new file mode 100644
index 000000000000..88dafb5e9627
--- /dev/null
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 * Copyright (C) 2012-2013 Altera Corporation <www.altera.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Based from clk-highbank.c
16 *
17 */
18#include <linux/clk.h>
19#include <linux/clkdev.h>
20#include <linux/clk-provider.h>
21#include <linux/io.h>
22#include <linux/of.h>
23
24#include "clk.h"
25
26/* Clock bypass bits */
27#define MAINPLL_BYPASS (1<<0)
28#define SDRAMPLL_BYPASS (1<<1)
29#define SDRAMPLL_SRC_BYPASS (1<<2)
30#define PERPLL_BYPASS (1<<3)
31#define PERPLL_SRC_BYPASS (1<<4)
32
33#define SOCFPGA_PLL_BG_PWRDWN 0
34#define SOCFPGA_PLL_EXT_ENA 1
35#define SOCFPGA_PLL_PWR_DOWN 2
36#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8
37#define SOCFPGA_PLL_DIVF_SHIFT 3
38#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000
39#define SOCFPGA_PLL_DIVQ_SHIFT 16
40
41#define CLK_MGR_PLL_CLK_SRC_SHIFT 22
42#define CLK_MGR_PLL_CLK_SRC_MASK 0x3
43
44#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
45
46static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
47 unsigned long parent_rate)
48{
49 struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
50 unsigned long divf, divq, reg;
51 unsigned long long vco_freq;
52 unsigned long bypass;
53
54 reg = readl(socfpgaclk->hw.reg);
55 bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
56 if (bypass & MAINPLL_BYPASS)
57 return parent_rate;
58
59 divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
60 divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
61 vco_freq = (unsigned long long)parent_rate * (divf + 1);
62 do_div(vco_freq, (1 + divq));
63 return (unsigned long)vco_freq;
64}
65
66static u8 clk_pll_get_parent(struct clk_hw *hwclk)
67{
68 u32 pll_src;
69 struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
70
71 pll_src = readl(socfpgaclk->hw.reg);
72 return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) &
73 CLK_MGR_PLL_CLK_SRC_MASK;
74}
75
76static struct clk_ops clk_pll_ops = {
77 .recalc_rate = clk_pll_recalc_rate,
78 .get_parent = clk_pll_get_parent,
79};
80
81static __init struct clk *__socfpga_pll_init(struct device_node *node,
82 const struct clk_ops *ops)
83{
84 u32 reg;
85 struct clk *clk;
86 struct socfpga_pll *pll_clk;
87 const char *clk_name = node->name;
88 const char *parent_name[SOCFPGA_MAX_PARENTS];
89 struct clk_init_data init;
90 int rc;
91 int i = 0;
92
93 of_property_read_u32(node, "reg", &reg);
94
95 pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
96 if (WARN_ON(!pll_clk))
97 return NULL;
98
99 pll_clk->hw.reg = clk_mgr_base_addr + reg;
100
101 of_property_read_string(node, "clock-output-names", &clk_name);
102
103 init.name = clk_name;
104 init.ops = ops;
105 init.flags = 0;
106
107 while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
108 of_clk_get_parent_name(node, i)) != NULL)
109 i++;
110
111 init.num_parents = i;
112 init.parent_names = parent_name;
113 pll_clk->hw.hw.init = &init;
114
115 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
116 clk_pll_ops.enable = clk_gate_ops.enable;
117 clk_pll_ops.disable = clk_gate_ops.disable;
118
119 clk = clk_register(NULL, &pll_clk->hw.hw);
120 if (WARN_ON(IS_ERR(clk))) {
121 kfree(pll_clk);
122 return NULL;
123 }
124 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
125 return clk;
126}
127
128void __init socfpga_pll_init(struct device_node *node)
129{
130 __socfpga_pll_init(node, &clk_pll_ops);
131}
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5983a26a8c5f..35a960a993f9 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -22,325 +22,23 @@
22#include <linux/clk-provider.h> 22#include <linux/clk-provider.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h>
25 26
26/* Clock Manager offsets */ 27#include "clk.h"
27#define CLKMGR_CTRL 0x0
28#define CLKMGR_BYPASS 0x4
29#define CLKMGR_L4SRC 0x70
30#define CLKMGR_PERPLL_SRC 0xAC
31 28
32/* Clock bypass bits */ 29void __iomem *clk_mgr_base_addr;
33#define MAINPLL_BYPASS (1<<0)
34#define SDRAMPLL_BYPASS (1<<1)
35#define SDRAMPLL_SRC_BYPASS (1<<2)
36#define PERPLL_BYPASS (1<<3)
37#define PERPLL_SRC_BYPASS (1<<4)
38 30
39#define SOCFPGA_PLL_BG_PWRDWN 0 31static const struct of_device_id socfpga_child_clocks[] __initconst = {
40#define SOCFPGA_PLL_EXT_ENA 1 32 { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
41#define SOCFPGA_PLL_PWR_DOWN 2 33 { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
42#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8 34 { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
43#define SOCFPGA_PLL_DIVF_SHIFT 3 35 {},
44#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000
45#define SOCFPGA_PLL_DIVQ_SHIFT 16
46#define SOCFGPA_MAX_PARENTS 3
47
48#define SOCFPGA_L4_MP_CLK "l4_mp_clk"
49#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
50#define SOCFPGA_NAND_CLK "nand_clk"
51#define SOCFPGA_NAND_X_CLK "nand_x_clk"
52#define SOCFPGA_MMC_CLK "sdmmc_clk"
53#define SOCFPGA_DB_CLK "gpio_db_clk"
54
55#define div_mask(width) ((1 << (width)) - 1)
56#define streq(a, b) (strcmp((a), (b)) == 0)
57
58extern void __iomem *clk_mgr_base_addr;
59
60struct socfpga_clk {
61 struct clk_gate hw;
62 char *parent_name;
63 char *clk_name;
64 u32 fixed_div;
65 void __iomem *div_reg;
66 u32 width; /* only valid if div_reg != 0 */
67 u32 shift; /* only valid if div_reg != 0 */
68};
69#define to_socfpga_clk(p) container_of(p, struct socfpga_clk, hw.hw)
70
71static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
72 unsigned long parent_rate)
73{
74 struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
75 unsigned long divf, divq, vco_freq, reg;
76 unsigned long bypass;
77
78 reg = readl(socfpgaclk->hw.reg);
79 bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
80 if (bypass & MAINPLL_BYPASS)
81 return parent_rate;
82
83 divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
84 divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
85 vco_freq = parent_rate * (divf + 1);
86 return vco_freq / (1 + divq);
87}
88
89
90static struct clk_ops clk_pll_ops = {
91 .recalc_rate = clk_pll_recalc_rate,
92};
93
94static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
95 unsigned long parent_rate)
96{
97 struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
98 u32 div;
99
100 if (socfpgaclk->fixed_div)
101 div = socfpgaclk->fixed_div;
102 else
103 div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
104
105 return parent_rate / div;
106}
107
108static const struct clk_ops periclk_ops = {
109 .recalc_rate = clk_periclk_recalc_rate,
110};
111
112static __init struct clk *socfpga_clk_init(struct device_node *node,
113 const struct clk_ops *ops)
114{
115 u32 reg;
116 struct clk *clk;
117 struct socfpga_clk *socfpga_clk;
118 const char *clk_name = node->name;
119 const char *parent_name;
120 struct clk_init_data init;
121 int rc;
122 u32 fixed_div;
123
124 of_property_read_u32(node, "reg", &reg);
125
126 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
127 if (WARN_ON(!socfpga_clk))
128 return NULL;
129
130 socfpga_clk->hw.reg = clk_mgr_base_addr + reg;
131
132 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
133 if (rc)
134 socfpga_clk->fixed_div = 0;
135 else
136 socfpga_clk->fixed_div = fixed_div;
137
138 of_property_read_string(node, "clock-output-names", &clk_name);
139
140 init.name = clk_name;
141 init.ops = ops;
142 init.flags = 0;
143 parent_name = of_clk_get_parent_name(node, 0);
144 init.parent_names = &parent_name;
145 init.num_parents = 1;
146
147 socfpga_clk->hw.hw.init = &init;
148
149 if (streq(clk_name, "main_pll") ||
150 streq(clk_name, "periph_pll") ||
151 streq(clk_name, "sdram_pll")) {
152 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
153 clk_pll_ops.enable = clk_gate_ops.enable;
154 clk_pll_ops.disable = clk_gate_ops.disable;
155 }
156
157 clk = clk_register(NULL, &socfpga_clk->hw.hw);
158 if (WARN_ON(IS_ERR(clk))) {
159 kfree(socfpga_clk);
160 return NULL;
161 }
162 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
163 return clk;
164}
165
166static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
167{
168 u32 l4_src;
169 u32 perpll_src;
170
171 if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
172 l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
173 return l4_src &= 0x1;
174 }
175 if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
176 l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
177 return !!(l4_src & 2);
178 }
179
180 perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
181 if (streq(hwclk->init->name, SOCFPGA_MMC_CLK))
182 return perpll_src &= 0x3;
183 if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
184 streq(hwclk->init->name, SOCFPGA_NAND_X_CLK))
185 return (perpll_src >> 2) & 3;
186
187 /* QSPI clock */
188 return (perpll_src >> 4) & 3;
189
190}
191
192static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
193{
194 u32 src_reg;
195
196 if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) {
197 src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
198 src_reg &= ~0x1;
199 src_reg |= parent;
200 writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
201 } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) {
202 src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
203 src_reg &= ~0x2;
204 src_reg |= (parent << 1);
205 writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC);
206 } else {
207 src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
208 if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) {
209 src_reg &= ~0x3;
210 src_reg |= parent;
211 } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) ||
212 streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) {
213 src_reg &= ~0xC;
214 src_reg |= (parent << 2);
215 } else {/* QSPI clock */
216 src_reg &= ~0x30;
217 src_reg |= (parent << 4);
218 }
219 writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
220 }
221
222 return 0;
223}
224
225static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
226 unsigned long parent_rate)
227{
228 struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk);
229 u32 div = 1, val;
230
231 if (socfpgaclk->fixed_div)
232 div = socfpgaclk->fixed_div;
233 else if (socfpgaclk->div_reg) {
234 val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
235 val &= div_mask(socfpgaclk->width);
236 if (streq(hwclk->init->name, SOCFPGA_DB_CLK))
237 div = val + 1;
238 else
239 div = (1 << val);
240 }
241
242 return parent_rate / div;
243}
244
245static struct clk_ops gateclk_ops = {
246 .recalc_rate = socfpga_clk_recalc_rate,
247 .get_parent = socfpga_clk_get_parent,
248 .set_parent = socfpga_clk_set_parent,
249}; 36};
250 37
251static void __init socfpga_gate_clk_init(struct device_node *node, 38static void __init socfpga_clkmgr_init(struct device_node *node)
252 const struct clk_ops *ops)
253{
254 u32 clk_gate[2];
255 u32 div_reg[3];
256 u32 fixed_div;
257 struct clk *clk;
258 struct socfpga_clk *socfpga_clk;
259 const char *clk_name = node->name;
260 const char *parent_name[SOCFGPA_MAX_PARENTS];
261 struct clk_init_data init;
262 int rc;
263 int i = 0;
264
265 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
266 if (WARN_ON(!socfpga_clk))
267 return;
268
269 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
270 if (rc)
271 clk_gate[0] = 0;
272
273 if (clk_gate[0]) {
274 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
275 socfpga_clk->hw.bit_idx = clk_gate[1];
276
277 gateclk_ops.enable = clk_gate_ops.enable;
278 gateclk_ops.disable = clk_gate_ops.disable;
279 }
280
281 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
282 if (rc)
283 socfpga_clk->fixed_div = 0;
284 else
285 socfpga_clk->fixed_div = fixed_div;
286
287 rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
288 if (!rc) {
289 socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0];
290 socfpga_clk->shift = div_reg[1];
291 socfpga_clk->width = div_reg[2];
292 } else {
293 socfpga_clk->div_reg = NULL;
294 }
295
296 of_property_read_string(node, "clock-output-names", &clk_name);
297
298 init.name = clk_name;
299 init.ops = ops;
300 init.flags = 0;
301 while (i < SOCFGPA_MAX_PARENTS && (parent_name[i] =
302 of_clk_get_parent_name(node, i)) != NULL)
303 i++;
304
305 init.parent_names = parent_name;
306 init.num_parents = i;
307 socfpga_clk->hw.hw.init = &init;
308
309 clk = clk_register(NULL, &socfpga_clk->hw.hw);
310 if (WARN_ON(IS_ERR(clk))) {
311 kfree(socfpga_clk);
312 return;
313 }
314 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
315 if (WARN_ON(rc))
316 return;
317}
318
319static void __init socfpga_pll_init(struct device_node *node)
320{ 39{
321 socfpga_clk_init(node, &clk_pll_ops); 40 clk_mgr_base_addr = of_iomap(node, 0);
41 of_clk_init(socfpga_child_clocks);
322} 42}
323CLK_OF_DECLARE(socfpga_pll, "altr,socfpga-pll-clock", socfpga_pll_init); 43CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
324 44
325static void __init socfpga_periph_init(struct device_node *node)
326{
327 socfpga_clk_init(node, &periclk_ops);
328}
329CLK_OF_DECLARE(socfpga_periph, "altr,socfpga-perip-clk", socfpga_periph_init);
330
331static void __init socfpga_gate_init(struct device_node *node)
332{
333 socfpga_gate_clk_init(node, &gateclk_ops);
334}
335CLK_OF_DECLARE(socfpga_gate, "altr,socfpga-gate-clk", socfpga_gate_init);
336
337void __init socfpga_init_clocks(void)
338{
339 struct clk *clk;
340 int ret;
341
342 clk = clk_register_fixed_factor(NULL, "smp_twd", "mpuclk", 0, 1, 4);
343 ret = clk_register_clkdev(clk, NULL, "smp_twd");
344 if (ret)
345 pr_err("smp_twd alias not registered\n");
346}
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
new file mode 100644
index 000000000000..d2e54019c94f
--- /dev/null
+++ b/drivers/clk/socfpga/clk.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2013, Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * based on drivers/clk/tegra/clk.h
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#ifndef __SOCFPGA_CLK_H
18#define __SOCFPGA_CLK_H
19
20#include <linux/clk-provider.h>
21#include <linux/clkdev.h>
22
23/* Clock Manager offsets */
24#define CLKMGR_CTRL 0x0
25#define CLKMGR_BYPASS 0x4
26#define CLKMGR_L4SRC 0x70
27#define CLKMGR_PERPLL_SRC 0xAC
28
29#define SOCFPGA_MAX_PARENTS 3
30
31extern void __iomem *clk_mgr_base_addr;
32
33void __init socfpga_pll_init(struct device_node *node);
34void __init socfpga_periph_init(struct device_node *node);
35void __init socfpga_gate_init(struct device_node *node);
36
37struct socfpga_pll {
38 struct clk_gate hw;
39};
40
41struct socfpga_gate_clk {
42 struct clk_gate hw;
43 char *parent_name;
44 u32 fixed_div;
45 void __iomem *div_reg;
46 u32 width; /* only valid if div_reg != 0 */
47 u32 shift; /* only valid if div_reg != 0 */
48 u32 clk_phase[2];
49};
50
51struct socfpga_periph_clk {
52 struct clk_gate hw;
53 char *parent_name;
54 u32 fixed_div;
55};
56
57#endif /* SOCFPGA_CLK_H */
diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile
new file mode 100644
index 000000000000..c7455ffdbdf7
--- /dev/null
+++ b/drivers/clk/st/Makefile
@@ -0,0 +1 @@
obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
new file mode 100644
index 000000000000..4f53ee0778d9
--- /dev/null
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -0,0 +1,1039 @@
1/*
2 * Copyright (C) 2014 STMicroelectronics R&D Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10/*
11 * Authors:
12 * Stephen Gallimore <stephen.gallimore@st.com>,
13 * Pankaj Dev <pankaj.dev@st.com>.
14 */
15
16#include <linux/slab.h>
17#include <linux/of_address.h>
18#include <linux/clk-provider.h>
19
20#include "clkgen.h"
21
22/*
23 * Maximum input clock to the PLL before we divide it down by 2
24 * although in reality in actual systems this has never been seen to
25 * be used.
26 */
27#define QUADFS_NDIV_THRESHOLD 30000000
28
29#define PLL_BW_GOODREF (0L)
30#define PLL_BW_VBADREF (1L)
31#define PLL_BW_BADREF (2L)
32#define PLL_BW_VGOODREF (3L)
33
34#define QUADFS_MAX_CHAN 4
35
36struct stm_fs {
37 unsigned long ndiv;
38 unsigned long mdiv;
39 unsigned long pe;
40 unsigned long sdiv;
41 unsigned long nsdiv;
42};
43
44static struct stm_fs fs216c65_rtbl[] = {
45 { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 312.5 Khz */
46 { .mdiv = 0x17, .pe = 0x25ed, .sdiv = 0x1, .nsdiv = 0 }, /* 27 MHz */
47 { .mdiv = 0x1a, .pe = 0x7b36, .sdiv = 0x2, .nsdiv = 1 }, /* 36.87 MHz */
48 { .mdiv = 0x13, .pe = 0x0, .sdiv = 0x2, .nsdiv = 1 }, /* 48 MHz */
49 { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x1, .nsdiv = 1 }, /* 108 MHz */
50};
51
52static struct stm_fs fs432c65_rtbl[] = {
53 { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 625 Khz */
54 { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x2, .nsdiv = 1 }, /* 108 MHz */
55 { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x0, .nsdiv = 1 }, /* 297 MHz */
56};
57
58static struct stm_fs fs660c32_rtbl[] = {
59 { .mdiv = 0x01, .pe = 0x2aaa, .sdiv = 0x8, .nsdiv = 0 }, /* 600 KHz */
60 { .mdiv = 0x02, .pe = 0x3d33, .sdiv = 0x0, .nsdiv = 0 }, /* 148.5 Mhz */
61 { .mdiv = 0x13, .pe = 0x5bcc, .sdiv = 0x0, .nsdiv = 1 }, /* 297 Mhz */
62 { .mdiv = 0x0e, .pe = 0x1025, .sdiv = 0x0, .nsdiv = 1 }, /* 333 Mhz */
63 { .mdiv = 0x0b, .pe = 0x715f, .sdiv = 0x0, .nsdiv = 1 }, /* 350 Mhz */
64};
65
66struct clkgen_quadfs_data {
67 bool reset_present;
68 bool bwfilter_present;
69 bool lockstatus_present;
70 bool nsdiv_present;
71 struct clkgen_field ndiv;
72 struct clkgen_field ref_bw;
73 struct clkgen_field nreset;
74 struct clkgen_field npda;
75 struct clkgen_field lock_status;
76
77 struct clkgen_field nsb[QUADFS_MAX_CHAN];
78 struct clkgen_field en[QUADFS_MAX_CHAN];
79 struct clkgen_field mdiv[QUADFS_MAX_CHAN];
80 struct clkgen_field pe[QUADFS_MAX_CHAN];
81 struct clkgen_field sdiv[QUADFS_MAX_CHAN];
82 struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
83
84 const struct clk_ops *pll_ops;
85 struct stm_fs *rtbl;
86 u8 rtbl_cnt;
87 int (*get_rate)(unsigned long , struct stm_fs *,
88 unsigned long *);
89};
90
91static const struct clk_ops st_quadfs_pll_c65_ops;
92static const struct clk_ops st_quadfs_pll_c32_ops;
93static const struct clk_ops st_quadfs_fs216c65_ops;
94static const struct clk_ops st_quadfs_fs432c65_ops;
95static const struct clk_ops st_quadfs_fs660c32_ops;
96
97static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *,
98 unsigned long *);
99static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *,
100 unsigned long *);
101static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *,
102 unsigned long *);
103/*
104 * Values for all of the standalone instances of this clock
105 * generator found in STiH415 and STiH416 SYSCFG register banks. Note
106 * that the individual channel standby control bits (nsb) are in the
107 * first register along with the PLL control bits.
108 */
109static struct clkgen_quadfs_data st_fs216c65_416 = {
110 /* 416 specific */
111 .npda = CLKGEN_FIELD(0x0, 0x1, 14),
112 .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
113 CLKGEN_FIELD(0x0, 0x1, 11),
114 CLKGEN_FIELD(0x0, 0x1, 12),
115 CLKGEN_FIELD(0x0, 0x1, 13) },
116 .nsdiv_present = true,
117 .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
118 CLKGEN_FIELD(0x0, 0x1, 19),
119 CLKGEN_FIELD(0x0, 0x1, 20),
120 CLKGEN_FIELD(0x0, 0x1, 21) },
121 .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
122 CLKGEN_FIELD(0x14, 0x1f, 0),
123 CLKGEN_FIELD(0x24, 0x1f, 0),
124 CLKGEN_FIELD(0x34, 0x1f, 0) },
125 .en = { CLKGEN_FIELD(0x10, 0x1, 0),
126 CLKGEN_FIELD(0x20, 0x1, 0),
127 CLKGEN_FIELD(0x30, 0x1, 0),
128 CLKGEN_FIELD(0x40, 0x1, 0) },
129 .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
130 .bwfilter_present = true,
131 .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
132 .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
133 CLKGEN_FIELD(0x18, 0xffff, 0),
134 CLKGEN_FIELD(0x28, 0xffff, 0),
135 CLKGEN_FIELD(0x38, 0xffff, 0) },
136 .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
137 CLKGEN_FIELD(0x1C, 0x7, 0),
138 CLKGEN_FIELD(0x2C, 0x7, 0),
139 CLKGEN_FIELD(0x3C, 0x7, 0) },
140 .pll_ops = &st_quadfs_pll_c65_ops,
141 .rtbl = fs216c65_rtbl,
142 .rtbl_cnt = ARRAY_SIZE(fs216c65_rtbl),
143 .get_rate = clk_fs216c65_get_rate,
144};
145
146static struct clkgen_quadfs_data st_fs432c65_416 = {
147 .npda = CLKGEN_FIELD(0x0, 0x1, 14),
148 .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
149 CLKGEN_FIELD(0x0, 0x1, 11),
150 CLKGEN_FIELD(0x0, 0x1, 12),
151 CLKGEN_FIELD(0x0, 0x1, 13) },
152 .nsdiv_present = true,
153 .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
154 CLKGEN_FIELD(0x0, 0x1, 19),
155 CLKGEN_FIELD(0x0, 0x1, 20),
156 CLKGEN_FIELD(0x0, 0x1, 21) },
157 .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
158 CLKGEN_FIELD(0x14, 0x1f, 0),
159 CLKGEN_FIELD(0x24, 0x1f, 0),
160 CLKGEN_FIELD(0x34, 0x1f, 0) },
161 .en = { CLKGEN_FIELD(0x10, 0x1, 0),
162 CLKGEN_FIELD(0x20, 0x1, 0),
163 CLKGEN_FIELD(0x30, 0x1, 0),
164 CLKGEN_FIELD(0x40, 0x1, 0) },
165 .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
166 .bwfilter_present = true,
167 .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
168 .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
169 CLKGEN_FIELD(0x18, 0xffff, 0),
170 CLKGEN_FIELD(0x28, 0xffff, 0),
171 CLKGEN_FIELD(0x38, 0xffff, 0) },
172 .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
173 CLKGEN_FIELD(0x1C, 0x7, 0),
174 CLKGEN_FIELD(0x2C, 0x7, 0),
175 CLKGEN_FIELD(0x3C, 0x7, 0) },
176 .pll_ops = &st_quadfs_pll_c65_ops,
177 .rtbl = fs432c65_rtbl,
178 .rtbl_cnt = ARRAY_SIZE(fs432c65_rtbl),
179 .get_rate = clk_fs432c65_get_rate,
180};
181
182static struct clkgen_quadfs_data st_fs660c32_E_416 = {
183 .npda = CLKGEN_FIELD(0x0, 0x1, 14),
184 .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
185 CLKGEN_FIELD(0x0, 0x1, 11),
186 CLKGEN_FIELD(0x0, 0x1, 12),
187 CLKGEN_FIELD(0x0, 0x1, 13) },
188 .nsdiv_present = true,
189 .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
190 CLKGEN_FIELD(0x0, 0x1, 19),
191 CLKGEN_FIELD(0x0, 0x1, 20),
192 CLKGEN_FIELD(0x0, 0x1, 21) },
193 .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
194 CLKGEN_FIELD(0x14, 0x1f, 0),
195 CLKGEN_FIELD(0x24, 0x1f, 0),
196 CLKGEN_FIELD(0x34, 0x1f, 0) },
197 .en = { CLKGEN_FIELD(0x10, 0x1, 0),
198 CLKGEN_FIELD(0x20, 0x1, 0),
199 CLKGEN_FIELD(0x30, 0x1, 0),
200 CLKGEN_FIELD(0x40, 0x1, 0) },
201 .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
202 .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
203 CLKGEN_FIELD(0x18, 0x7fff, 0),
204 CLKGEN_FIELD(0x28, 0x7fff, 0),
205 CLKGEN_FIELD(0x38, 0x7fff, 0) },
206 .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
207 CLKGEN_FIELD(0x1C, 0xf, 0),
208 CLKGEN_FIELD(0x2C, 0xf, 0),
209 CLKGEN_FIELD(0x3C, 0xf, 0) },
210 .lockstatus_present = true,
211 .lock_status = CLKGEN_FIELD(0xAC, 0x1, 0),
212 .pll_ops = &st_quadfs_pll_c32_ops,
213 .rtbl = fs660c32_rtbl,
214 .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
215 .get_rate = clk_fs660c32_dig_get_rate,
216};
217
218static struct clkgen_quadfs_data st_fs660c32_F_416 = {
219 .npda = CLKGEN_FIELD(0x0, 0x1, 14),
220 .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
221 CLKGEN_FIELD(0x0, 0x1, 11),
222 CLKGEN_FIELD(0x0, 0x1, 12),
223 CLKGEN_FIELD(0x0, 0x1, 13) },
224 .nsdiv_present = true,
225 .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
226 CLKGEN_FIELD(0x0, 0x1, 19),
227 CLKGEN_FIELD(0x0, 0x1, 20),
228 CLKGEN_FIELD(0x0, 0x1, 21) },
229 .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
230 CLKGEN_FIELD(0x14, 0x1f, 0),
231 CLKGEN_FIELD(0x24, 0x1f, 0),
232 CLKGEN_FIELD(0x34, 0x1f, 0) },
233 .en = { CLKGEN_FIELD(0x10, 0x1, 0),
234 CLKGEN_FIELD(0x20, 0x1, 0),
235 CLKGEN_FIELD(0x30, 0x1, 0),
236 CLKGEN_FIELD(0x40, 0x1, 0) },
237 .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
238 .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
239 CLKGEN_FIELD(0x18, 0x7fff, 0),
240 CLKGEN_FIELD(0x28, 0x7fff, 0),
241 CLKGEN_FIELD(0x38, 0x7fff, 0) },
242 .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
243 CLKGEN_FIELD(0x1C, 0xf, 0),
244 CLKGEN_FIELD(0x2C, 0xf, 0),
245 CLKGEN_FIELD(0x3C, 0xf, 0) },
246 .lockstatus_present = true,
247 .lock_status = CLKGEN_FIELD(0xEC, 0x1, 0),
248 .pll_ops = &st_quadfs_pll_c32_ops,
249 .rtbl = fs660c32_rtbl,
250 .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
251 .get_rate = clk_fs660c32_dig_get_rate,
252};
253
254/**
255 * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
256 *
257 * Traits of this clock:
258 * prepare - clk_(un)prepare only ensures parent is (un)prepared
259 * enable - clk_enable and clk_disable are functional & control the Fsyn
260 * rate - inherits rate from parent. set_rate/round_rate/recalc_rate
261 * parent - fixed parent. No clk_set_parent support
262 */
263
264/**
265 * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of
266 * its parent clock, found inside a type of
267 * ST quad channel frequency synthesizer block
268 *
269 * @hw: handle between common and hardware-specific interfaces.
270 * @ndiv: regmap field for the ndiv control.
271 * @regs_base: base address of the configuration registers.
272 * @lock: spinlock.
273 *
274 */
275struct st_clk_quadfs_pll {
276 struct clk_hw hw;
277 void __iomem *regs_base;
278 spinlock_t *lock;
279 struct clkgen_quadfs_data *data;
280 u32 ndiv;
281};
282
283#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
284
285static int quadfs_pll_enable(struct clk_hw *hw)
286{
287 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
288 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
289
290 if (pll->lock)
291 spin_lock_irqsave(pll->lock, flags);
292
293 /*
294 * Bring block out of reset if we have reset control.
295 */
296 if (pll->data->reset_present)
297 CLKGEN_WRITE(pll, nreset, 1);
298
299 /*
300 * Use a fixed input clock noise bandwidth filter for the moment
301 */
302 if (pll->data->bwfilter_present)
303 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
304
305
306 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
307
308 /*
309 * Power up the PLL
310 */
311 CLKGEN_WRITE(pll, npda, 1);
312
313 if (pll->lock)
314 spin_unlock_irqrestore(pll->lock, flags);
315
316 if (pll->data->lockstatus_present)
317 while (!CLKGEN_READ(pll, lock_status)) {
318 if (time_after(jiffies, timeout))
319 return -ETIMEDOUT;
320 cpu_relax();
321 }
322
323 return 0;
324}
325
326static void quadfs_pll_disable(struct clk_hw *hw)
327{
328 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
329 unsigned long flags = 0;
330
331 if (pll->lock)
332 spin_lock_irqsave(pll->lock, flags);
333
334 /*
335 * Powerdown the PLL and then put block into soft reset if we have
336 * reset control.
337 */
338 CLKGEN_WRITE(pll, npda, 0);
339
340 if (pll->data->reset_present)
341 CLKGEN_WRITE(pll, nreset, 0);
342
343 if (pll->lock)
344 spin_unlock_irqrestore(pll->lock, flags);
345}
346
347static int quadfs_pll_is_enabled(struct clk_hw *hw)
348{
349 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
350 u32 npda = CLKGEN_READ(pll, npda);
351
352 return !!npda;
353}
354
355int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
356 unsigned long *rate)
357{
358 unsigned long nd = fs->ndiv + 16; /* ndiv value */
359
360 *rate = input * nd;
361
362 return 0;
363}
364
365static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
366 unsigned long parent_rate)
367{
368 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
369 unsigned long rate = 0;
370 struct stm_fs params;
371
372 params.ndiv = CLKGEN_READ(pll, ndiv);
373 if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
374 pr_err("%s:%s error calculating rate\n",
375 __clk_get_name(hw->clk), __func__);
376
377 pll->ndiv = params.ndiv;
378
379 return rate;
380}
381
382int clk_fs660c32_vco_get_params(unsigned long input,
383 unsigned long output, struct stm_fs *fs)
384{
385/* Formula
386 VCO frequency = (fin x ndiv) / pdiv
387 ndiv = VCOfreq * pdiv / fin
388 */
389 unsigned long pdiv = 1, n;
390
391 /* Output clock range: 384Mhz to 660Mhz */
392 if (output < 384000000 || output > 660000000)
393 return -EINVAL;
394
395 if (input > 40000000)
396 /* This means that PDIV would be 2 instead of 1.
397 Not supported today. */
398 return -EINVAL;
399
400 input /= 1000;
401 output /= 1000;
402
403 n = output * pdiv / input;
404 if (n < 16)
405 n = 16;
406 fs->ndiv = n - 16; /* Converting formula value to reg value */
407
408 return 0;
409}
410
411static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate
412 , unsigned long *prate)
413{
414 struct stm_fs params;
415
416 if (!clk_fs660c32_vco_get_params(*prate, rate, &params))
417 clk_fs660c32_vco_get_rate(*prate, &params, &rate);
418
419 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
420 __func__, __clk_get_name(hw->clk),
421 rate, (unsigned int)params.sdiv,
422 (unsigned int)params.mdiv,
423 (unsigned int)params.pe, (unsigned int)params.nsdiv);
424
425 return rate;
426}
427
428static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
429 unsigned long parent_rate)
430{
431 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
432 struct stm_fs params;
433 long hwrate = 0;
434 unsigned long flags = 0;
435
436 if (!rate || !parent_rate)
437 return -EINVAL;
438
439 if (!clk_fs660c32_vco_get_params(parent_rate, rate, &params))
440 clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
441
442 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
443 __func__, __clk_get_name(hw->clk),
444 hwrate, (unsigned int)params.ndiv);
445
446 if (!hwrate)
447 return -EINVAL;
448
449 pll->ndiv = params.ndiv;
450
451 if (pll->lock)
452 spin_lock_irqsave(pll->lock, flags);
453
454 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
455
456 if (pll->lock)
457 spin_unlock_irqrestore(pll->lock, flags);
458
459 return 0;
460}
461
462static const struct clk_ops st_quadfs_pll_c65_ops = {
463 .enable = quadfs_pll_enable,
464 .disable = quadfs_pll_disable,
465 .is_enabled = quadfs_pll_is_enabled,
466};
467
468static const struct clk_ops st_quadfs_pll_c32_ops = {
469 .enable = quadfs_pll_enable,
470 .disable = quadfs_pll_disable,
471 .is_enabled = quadfs_pll_is_enabled,
472 .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
473 .round_rate = quadfs_pll_fs660c32_round_rate,
474 .set_rate = quadfs_pll_fs660c32_set_rate,
475};
476
477static struct clk * __init st_clk_register_quadfs_pll(
478 const char *name, const char *parent_name,
479 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
480 spinlock_t *lock)
481{
482 struct st_clk_quadfs_pll *pll;
483 struct clk *clk;
484 struct clk_init_data init;
485
486 /*
487 * Sanity check required pointers.
488 */
489 if (WARN_ON(!name || !parent_name))
490 return ERR_PTR(-EINVAL);
491
492 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
493 if (!pll)
494 return ERR_PTR(-ENOMEM);
495
496 init.name = name;
497 init.ops = quadfs->pll_ops;
498 init.flags = CLK_IS_BASIC;
499 init.parent_names = &parent_name;
500 init.num_parents = 1;
501
502 pll->data = quadfs;
503 pll->regs_base = reg;
504 pll->lock = lock;
505 pll->hw.init = &init;
506
507 clk = clk_register(NULL, &pll->hw);
508
509 if (IS_ERR(clk))
510 kfree(pll);
511
512 return clk;
513}
514
515/**
516 * DOC: A digital frequency synthesizer
517 *
518 * Traits of this clock:
519 * prepare - clk_(un)prepare only ensures parent is (un)prepared
520 * enable - clk_enable and clk_disable are functional
521 * rate - set rate is functional
522 * parent - fixed parent. No clk_set_parent support
523 */
524
525/**
526 * struct st_clk_quadfs_fsynth - One clock output from a four channel digital
527 * frequency synthesizer (fsynth) block.
528 *
529 * @hw: handle between common and hardware-specific interfaces
530 *
531 * @nsb: regmap field in the output control register for the digital
532 * standby of this fsynth channel. This control is active low so
533 * the channel is in standby when the control bit is cleared.
534 *
535 * @nsdiv: regmap field in the output control register for
536 * for the optional divide by 3 of this fsynth channel. This control
537 * is active low so the divide by 3 is active when the control bit is
538 * cleared and the divide is bypassed when the bit is set.
539 */
540struct st_clk_quadfs_fsynth {
541 struct clk_hw hw;
542 void __iomem *regs_base;
543 spinlock_t *lock;
544 struct clkgen_quadfs_data *data;
545
546 u32 chan;
547 /*
548 * Cached hardware values from set_rate so we can program the
549 * hardware in enable. There are two reasons for this:
550 *
551 * 1. The registers may not be writable until the parent has been
552 * enabled.
553 *
554 * 2. It restores the clock rate when a driver does an enable
555 * on PM restore, after a suspend to RAM has lost the hardware
556 * setup.
557 */
558 u32 md;
559 u32 pe;
560 u32 sdiv;
561 u32 nsdiv;
562};
563
564#define to_quadfs_fsynth(_hw) \
565 container_of(_hw, struct st_clk_quadfs_fsynth, hw)
566
567static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
568{
569 /*
570 * Pulse the program enable register lsb to make the hardware take
571 * notice of the new md/pe values with a glitchless transition.
572 */
573 CLKGEN_WRITE(fs, en[fs->chan], 1);
574 CLKGEN_WRITE(fs, en[fs->chan], 0);
575}
576
577static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
578{
579 unsigned long flags = 0;
580
581 /*
582 * Ensure the md/pe parameters are ignored while we are
583 * reprogramming them so we can get a glitchless change
584 * when fine tuning the speed of a running clock.
585 */
586 CLKGEN_WRITE(fs, en[fs->chan], 0);
587
588 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
589 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
590 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
591
592 if (fs->lock)
593 spin_lock_irqsave(fs->lock, flags);
594
595 if (fs->data->nsdiv_present)
596 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
597
598 if (fs->lock)
599 spin_unlock_irqrestore(fs->lock, flags);
600}
601
602static int quadfs_fsynth_enable(struct clk_hw *hw)
603{
604 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
605 unsigned long flags = 0;
606
607 pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
608
609 quadfs_fsynth_program_rate(fs);
610
611 if (fs->lock)
612 spin_lock_irqsave(fs->lock, flags);
613
614 CLKGEN_WRITE(fs, nsb[fs->chan], 1);
615
616 if (fs->lock)
617 spin_unlock_irqrestore(fs->lock, flags);
618
619 quadfs_fsynth_program_enable(fs);
620
621 return 0;
622}
623
624static void quadfs_fsynth_disable(struct clk_hw *hw)
625{
626 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
627 unsigned long flags = 0;
628
629 pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
630
631 if (fs->lock)
632 spin_lock_irqsave(fs->lock, flags);
633
634 CLKGEN_WRITE(fs, nsb[fs->chan], 0);
635
636 if (fs->lock)
637 spin_unlock_irqrestore(fs->lock, flags);
638}
639
640static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
641{
642 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
643 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
644
645 pr_debug("%s: %s enable bit = 0x%x\n",
646 __func__, __clk_get_name(hw->clk), nsb);
647
648 return !!nsb;
649}
650
651#define P15 (uint64_t)(1 << 15)
652
653static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs,
654 unsigned long *rate)
655{
656 uint64_t res;
657 unsigned long ns;
658 unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */
659 unsigned long s;
660 long m;
661
662 m = fs->mdiv - 32;
663 s = 1 << (fs->sdiv + 1);
664 ns = (fs->nsdiv ? 1 : 3);
665
666 res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33));
667 res = res - (s * ns * fs->pe);
668 *rate = div64_u64(P15 * nd * input * 32, res);
669
670 return 0;
671}
672
673static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs,
674 unsigned long *rate)
675{
676 uint64_t res;
677 unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */
678 long m;
679 unsigned long sd;
680 unsigned long ns;
681
682 m = fs->mdiv - 32;
683 sd = 1 << (fs->sdiv + 1);
684 ns = (fs->nsdiv ? 1 : 3);
685
686 res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33));
687 res = res - (sd * ns * fs->pe);
688 *rate = div64_u64(P15 * nd * input * 32, res);
689
690 return 0;
691}
692
693#define P20 (uint64_t)(1 << 20)
694
695static int clk_fs660c32_dig_get_rate(unsigned long input,
696 struct stm_fs *fs, unsigned long *rate)
697{
698 unsigned long s = (1 << fs->sdiv);
699 unsigned long ns;
700 uint64_t res;
701
702 /*
703 * 'nsdiv' is a register value ('BIN') which is translated
704 * to a decimal value according to following rules.
705 *
706 * nsdiv ns.dec
707 * 0 3
708 * 1 1
709 */
710 ns = (fs->nsdiv == 1) ? 1 : 3;
711
712 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
713 *rate = (unsigned long)div64_u64(input * P20 * 32, res);
714
715 return 0;
716}
717
718static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
719 struct stm_fs *params)
720{
721 /*
722 * Get the initial hardware values for recalc_rate
723 */
724 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
725 params->pe = CLKGEN_READ(fs, pe[fs->chan]);
726 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
727
728 if (fs->data->nsdiv_present)
729 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
730 else
731 params->nsdiv = 1;
732
733 /*
734 * If All are NULL then assume no clock rate is programmed.
735 */
736 if (!params->mdiv && !params->pe && !params->sdiv)
737 return 1;
738
739 fs->md = params->mdiv;
740 fs->pe = params->pe;
741 fs->sdiv = params->sdiv;
742 fs->nsdiv = params->nsdiv;
743
744 return 0;
745}
746
747static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
748 unsigned long prate, struct stm_fs *params)
749{
750 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
751 int (*clk_fs_get_rate)(unsigned long ,
752 struct stm_fs *, unsigned long *);
753 struct stm_fs prev_params;
754 unsigned long prev_rate, rate = 0;
755 unsigned long diff_rate, prev_diff_rate = ~0;
756 int index;
757
758 clk_fs_get_rate = fs->data->get_rate;
759
760 for (index = 0; index < fs->data->rtbl_cnt; index++) {
761 prev_rate = rate;
762
763 *params = fs->data->rtbl[index];
764 prev_params = *params;
765
766 clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate);
767
768 diff_rate = abs(drate - rate);
769
770 if (diff_rate > prev_diff_rate) {
771 rate = prev_rate;
772 *params = prev_params;
773 break;
774 }
775
776 prev_diff_rate = diff_rate;
777
778 if (drate == rate)
779 return rate;
780 }
781
782
783 if (index == fs->data->rtbl_cnt)
784 *params = prev_params;
785
786 return rate;
787}
788
789static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
790 unsigned long parent_rate)
791{
792 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
793 unsigned long rate = 0;
794 struct stm_fs params;
795 int (*clk_fs_get_rate)(unsigned long ,
796 struct stm_fs *, unsigned long *);
797
798 clk_fs_get_rate = fs->data->get_rate;
799
800 if (quadfs_fsynt_get_hw_value_for_recalc(fs, &params))
801 return 0;
802
803 if (clk_fs_get_rate(parent_rate, &params, &rate)) {
804 pr_err("%s:%s error calculating rate\n",
805 __clk_get_name(hw->clk), __func__);
806 }
807
808 pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
809
810 return rate;
811}
812
813static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
814 unsigned long *prate)
815{
816 struct stm_fs params;
817
818 rate = quadfs_find_best_rate(hw, rate, *prate, &params);
819
820 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
821 __func__, __clk_get_name(hw->clk),
822 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
823 (unsigned int)params.pe, (unsigned int)params.nsdiv);
824
825 return rate;
826}
827
828
829static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
830 struct stm_fs *params)
831{
832 fs->md = params->mdiv;
833 fs->pe = params->pe;
834 fs->sdiv = params->sdiv;
835 fs->nsdiv = params->nsdiv;
836
837 /*
838 * In some integrations you can only change the fsynth programming when
839 * the parent entity containing it is enabled.
840 */
841 quadfs_fsynth_program_rate(fs);
842 quadfs_fsynth_program_enable(fs);
843}
844
845static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
846 unsigned long parent_rate)
847{
848 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
849 struct stm_fs params;
850 long hwrate;
851 int uninitialized_var(i);
852
853 if (!rate || !parent_rate)
854 return -EINVAL;
855
856 memset(&params, 0, sizeof(struct stm_fs));
857
858 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, &params);
859 if (!hwrate)
860 return -EINVAL;
861
862 quadfs_program_and_enable(fs, &params);
863
864 return 0;
865}
866
867
868
869static const struct clk_ops st_quadfs_ops = {
870 .enable = quadfs_fsynth_enable,
871 .disable = quadfs_fsynth_disable,
872 .is_enabled = quadfs_fsynth_is_enabled,
873 .round_rate = quadfs_round_rate,
874 .set_rate = quadfs_set_rate,
875 .recalc_rate = quadfs_recalc_rate,
876};
877
878static struct clk * __init st_clk_register_quadfs_fsynth(
879 const char *name, const char *parent_name,
880 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
881 spinlock_t *lock)
882{
883 struct st_clk_quadfs_fsynth *fs;
884 struct clk *clk;
885 struct clk_init_data init;
886
887 /*
888 * Sanity check required pointers, note that nsdiv3 is optional.
889 */
890 if (WARN_ON(!name || !parent_name))
891 return ERR_PTR(-EINVAL);
892
893 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
894 if (!fs)
895 return ERR_PTR(-ENOMEM);
896
897 init.name = name;
898 init.ops = &st_quadfs_ops;
899 init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
900 init.parent_names = &parent_name;
901 init.num_parents = 1;
902
903 fs->data = quadfs;
904 fs->regs_base = reg;
905 fs->chan = chan;
906 fs->lock = lock;
907 fs->hw.init = &init;
908
909 clk = clk_register(NULL, &fs->hw);
910
911 if (IS_ERR(clk))
912 kfree(fs);
913
914 return clk;
915}
916
917static struct of_device_id quadfs_of_match[] = {
918 {
919 .compatible = "st,stih416-quadfs216",
920 .data = (void *)&st_fs216c65_416
921 },
922 {
923 .compatible = "st,stih416-quadfs432",
924 .data = (void *)&st_fs432c65_416
925 },
926 {
927 .compatible = "st,stih416-quadfs660-E",
928 .data = (void *)&st_fs660c32_E_416
929 },
930 {
931 .compatible = "st,stih416-quadfs660-F",
932 .data = (void *)&st_fs660c32_F_416
933 },
934 {}
935};
936
937static void __init st_of_create_quadfs_fsynths(
938 struct device_node *np, const char *pll_name,
939 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
940 spinlock_t *lock)
941{
942 struct clk_onecell_data *clk_data;
943 int fschan;
944
945 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
946 if (!clk_data)
947 return;
948
949 clk_data->clk_num = QUADFS_MAX_CHAN;
950 clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *),
951 GFP_KERNEL);
952
953 if (!clk_data->clks) {
954 kfree(clk_data);
955 return;
956 }
957
958 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
959 struct clk *clk;
960 const char *clk_name;
961
962 if (of_property_read_string_index(np, "clock-output-names",
963 fschan, &clk_name)) {
964 break;
965 }
966
967 /*
968 * If we read an empty clock name then the channel is unused
969 */
970 if (*clk_name == '\0')
971 continue;
972
973 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
974 quadfs, reg, fschan, lock);
975
976 /*
977 * If there was an error registering this clock output, clean
978 * up and move on to the next one.
979 */
980 if (!IS_ERR(clk)) {
981 clk_data->clks[fschan] = clk;
982 pr_debug("%s: parent %s rate %u\n",
983 __clk_get_name(clk),
984 __clk_get_name(clk_get_parent(clk)),
985 (unsigned int)clk_get_rate(clk));
986 }
987 }
988
989 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
990}
991
992static void __init st_of_quadfs_setup(struct device_node *np)
993{
994 const struct of_device_id *match;
995 struct clk *clk;
996 const char *pll_name, *clk_parent_name;
997 void __iomem *reg;
998 spinlock_t *lock;
999
1000 match = of_match_node(quadfs_of_match, np);
1001 if (WARN_ON(!match))
1002 return;
1003
1004 reg = of_iomap(np, 0);
1005 if (!reg)
1006 return;
1007
1008 clk_parent_name = of_clk_get_parent_name(np, 0);
1009 if (!clk_parent_name)
1010 return;
1011
1012 pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name);
1013 if (!pll_name)
1014 return;
1015
1016 lock = kzalloc(sizeof(*lock), GFP_KERNEL);
1017 if (!lock)
1018 goto err_exit;
1019
1020 spin_lock_init(lock);
1021
1022 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name,
1023 (struct clkgen_quadfs_data *) match->data, reg, lock);
1024 if (IS_ERR(clk))
1025 goto err_exit;
1026 else
1027 pr_debug("%s: parent %s rate %u\n",
1028 __clk_get_name(clk),
1029 __clk_get_name(clk_get_parent(clk)),
1030 (unsigned int)clk_get_rate(clk));
1031
1032 st_of_create_quadfs_fsynths(np, pll_name,
1033 (struct clkgen_quadfs_data *)match->data,
1034 reg, lock);
1035
1036err_exit:
1037 kfree(pll_name); /* No longer need local copy of the PLL name */
1038}
1039CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup);
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
new file mode 100644
index 000000000000..a329906d1e81
--- /dev/null
+++ b/drivers/clk/st/clkgen-mux.c
@@ -0,0 +1,820 @@
1/*
2 * clkgen-mux.c: ST GEN-MUX Clock driver
3 *
4 * Copyright (C) 2014 STMicroelectronics (R&D) Limited
5 *
6 * Authors: Stephen Gallimore <stephen.gallimore@st.com>
7 * Pankaj Dev <pankaj.dev@st.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 */
15
16#include <linux/slab.h>
17#include <linux/of_address.h>
18#include <linux/clk-provider.h>
19
20static DEFINE_SPINLOCK(clkgena_divmux_lock);
21static DEFINE_SPINLOCK(clkgenf_lock);
22
23static const char ** __init clkgen_mux_get_parents(struct device_node *np,
24 int *num_parents)
25{
26 const char **parents;
27 int nparents, i;
28
29 nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
30 if (WARN_ON(nparents <= 0))
31 return ERR_PTR(-EINVAL);
32
33 parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL);
34 if (!parents)
35 return ERR_PTR(-ENOMEM);
36
37 for (i = 0; i < nparents; i++)
38 parents[i] = of_clk_get_parent_name(np, i);
39
40 *num_parents = nparents;
41 return parents;
42}
43
44/**
45 * DOC: Clock mux with a programmable divider on each of its three inputs.
46 * The mux has an input setting which effectively gates its output.
47 *
48 * Traits of this clock:
49 * prepare - clk_(un)prepare only ensures parent is (un)prepared
50 * enable - clk_enable and clk_disable are functional & control gating
51 * rate - set rate is supported
52 * parent - set/get parent
53 */
54
55#define NUM_INPUTS 3
56
57struct clkgena_divmux {
58 struct clk_hw hw;
59 /* Subclassed mux and divider structures */
60 struct clk_mux mux;
61 struct clk_divider div[NUM_INPUTS];
62 /* Enable/running feedback register bits for each input */
63 void __iomem *feedback_reg[NUM_INPUTS];
64 int feedback_bit_idx;
65
66 u8 muxsel;
67};
68
69#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw)
70
71struct clkgena_divmux_data {
72 int num_outputs;
73 int mux_offset;
74 int mux_offset2;
75 int mux_start_bit;
76 int div_offsets[NUM_INPUTS];
77 int fb_offsets[NUM_INPUTS];
78 int fb_start_bit_idx;
79};
80
81#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3
82
83static int clkgena_divmux_is_running(struct clkgena_divmux *mux)
84{
85 u32 regval = readl(mux->feedback_reg[mux->muxsel]);
86 u32 running = regval & BIT(mux->feedback_bit_idx);
87 return !!running;
88}
89
90static int clkgena_divmux_enable(struct clk_hw *hw)
91{
92 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
93 struct clk_hw *mux_hw = &genamux->mux.hw;
94 unsigned long timeout;
95 int ret = 0;
96
97 mux_hw->clk = hw->clk;
98
99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
100 if (ret)
101 return ret;
102
103 timeout = jiffies + msecs_to_jiffies(10);
104
105 while (!clkgena_divmux_is_running(genamux)) {
106 if (time_after(jiffies, timeout))
107 return -ETIMEDOUT;
108 cpu_relax();
109 }
110
111 return 0;
112}
113
114static void clkgena_divmux_disable(struct clk_hw *hw)
115{
116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
117 struct clk_hw *mux_hw = &genamux->mux.hw;
118
119 mux_hw->clk = hw->clk;
120
121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
122}
123
124static int clkgena_divmux_is_enabled(struct clk_hw *hw)
125{
126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
127 struct clk_hw *mux_hw = &genamux->mux.hw;
128
129 mux_hw->clk = hw->clk;
130
131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
132}
133
134u8 clkgena_divmux_get_parent(struct clk_hw *hw)
135{
136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
137 struct clk_hw *mux_hw = &genamux->mux.hw;
138
139 mux_hw->clk = hw->clk;
140
141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
142 if ((s8)genamux->muxsel < 0) {
143 pr_debug("%s: %s: Invalid parent, setting to default.\n",
144 __func__, __clk_get_name(hw->clk));
145 genamux->muxsel = 0;
146 }
147
148 return genamux->muxsel;
149}
150
151static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
152{
153 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
154
155 if (index >= CKGAX_CLKOPSRC_SWITCH_OFF)
156 return -EINVAL;
157
158 genamux->muxsel = index;
159
160 /*
161 * If the mux is already enabled, call enable directly to set the
162 * new mux position and wait for it to start running again. Otherwise
163 * do nothing.
164 */
165 if (clkgena_divmux_is_enabled(hw))
166 clkgena_divmux_enable(hw);
167
168 return 0;
169}
170
171unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
172 unsigned long parent_rate)
173{
174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
176
177 div_hw->clk = hw->clk;
178
179 return clk_divider_ops.recalc_rate(div_hw, parent_rate);
180}
181
182static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
183 unsigned long parent_rate)
184{
185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
187
188 div_hw->clk = hw->clk;
189
190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
191}
192
193static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
194 unsigned long *prate)
195{
196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
198
199 div_hw->clk = hw->clk;
200
201 return clk_divider_ops.round_rate(div_hw, rate, prate);
202}
203
204static const struct clk_ops clkgena_divmux_ops = {
205 .enable = clkgena_divmux_enable,
206 .disable = clkgena_divmux_disable,
207 .is_enabled = clkgena_divmux_is_enabled,
208 .get_parent = clkgena_divmux_get_parent,
209 .set_parent = clkgena_divmux_set_parent,
210 .round_rate = clkgena_divmux_round_rate,
211 .recalc_rate = clkgena_divmux_recalc_rate,
212 .set_rate = clkgena_divmux_set_rate,
213};
214
215/**
216 * clk_register_genamux - register a genamux clock with the clock framework
217 */
218struct clk *clk_register_genamux(const char *name,
219 const char **parent_names, u8 num_parents,
220 void __iomem *reg,
221 const struct clkgena_divmux_data *muxdata,
222 u32 idx)
223{
224 /*
225 * Fixed constants across all ClockgenA variants
226 */
227 const int mux_width = 2;
228 const int divider_width = 5;
229 struct clkgena_divmux *genamux;
230 struct clk *clk;
231 struct clk_init_data init;
232 int i;
233
234 genamux = kzalloc(sizeof(*genamux), GFP_KERNEL);
235 if (!genamux)
236 return ERR_PTR(-ENOMEM);
237
238 init.name = name;
239 init.ops = &clkgena_divmux_ops;
240 init.flags = CLK_IS_BASIC;
241 init.parent_names = parent_names;
242 init.num_parents = num_parents;
243
244 genamux->mux.lock = &clkgena_divmux_lock;
245 genamux->mux.mask = BIT(mux_width) - 1;
246 genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width);
247 if (genamux->mux.shift > 31) {
248 /*
249 * We have spilled into the second mux register so
250 * adjust the register address and the bit shift accordingly
251 */
252 genamux->mux.reg = reg + muxdata->mux_offset2;
253 genamux->mux.shift -= 32;
254 } else {
255 genamux->mux.reg = reg + muxdata->mux_offset;
256 }
257
258 for (i = 0; i < NUM_INPUTS; i++) {
259 /*
260 * Divider config for each input
261 */
262 void __iomem *divbase = reg + muxdata->div_offsets[i];
263 genamux->div[i].width = divider_width;
264 genamux->div[i].reg = divbase + (idx * sizeof(u32));
265
266 /*
267 * Mux enabled/running feedback register for each input.
268 */
269 genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i];
270 }
271
272 genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx;
273 genamux->hw.init = &init;
274
275 clk = clk_register(NULL, &genamux->hw);
276 if (IS_ERR(clk)) {
277 kfree(genamux);
278 goto err;
279 }
280
281 pr_debug("%s: parent %s rate %lu\n",
282 __clk_get_name(clk),
283 __clk_get_name(clk_get_parent(clk)),
284 clk_get_rate(clk));
285err:
286 return clk;
287}
288
289static struct clkgena_divmux_data st_divmux_c65hs = {
290 .num_outputs = 4,
291 .mux_offset = 0x14,
292 .mux_start_bit = 0,
293 .div_offsets = { 0x800, 0x900, 0xb00 },
294 .fb_offsets = { 0x18, 0x1c, 0x20 },
295 .fb_start_bit_idx = 0,
296};
297
298static struct clkgena_divmux_data st_divmux_c65ls = {
299 .num_outputs = 14,
300 .mux_offset = 0x14,
301 .mux_offset2 = 0x24,
302 .mux_start_bit = 8,
303 .div_offsets = { 0x810, 0xa10, 0xb10 },
304 .fb_offsets = { 0x18, 0x1c, 0x20 },
305 .fb_start_bit_idx = 4,
306};
307
308static struct clkgena_divmux_data st_divmux_c32odf0 = {
309 .num_outputs = 8,
310 .mux_offset = 0x1c,
311 .mux_start_bit = 0,
312 .div_offsets = { 0x800, 0x900, 0xa60 },
313 .fb_offsets = { 0x2c, 0x24, 0x28 },
314 .fb_start_bit_idx = 0,
315};
316
317static struct clkgena_divmux_data st_divmux_c32odf1 = {
318 .num_outputs = 8,
319 .mux_offset = 0x1c,
320 .mux_start_bit = 16,
321 .div_offsets = { 0x820, 0x980, 0xa80 },
322 .fb_offsets = { 0x2c, 0x24, 0x28 },
323 .fb_start_bit_idx = 8,
324};
325
326static struct clkgena_divmux_data st_divmux_c32odf2 = {
327 .num_outputs = 8,
328 .mux_offset = 0x20,
329 .mux_start_bit = 0,
330 .div_offsets = { 0x840, 0xa20, 0xb10 },
331 .fb_offsets = { 0x2c, 0x24, 0x28 },
332 .fb_start_bit_idx = 16,
333};
334
335static struct clkgena_divmux_data st_divmux_c32odf3 = {
336 .num_outputs = 8,
337 .mux_offset = 0x20,
338 .mux_start_bit = 16,
339 .div_offsets = { 0x860, 0xa40, 0xb30 },
340 .fb_offsets = { 0x2c, 0x24, 0x28 },
341 .fb_start_bit_idx = 24,
342};
343
344static struct of_device_id clkgena_divmux_of_match[] = {
345 {
346 .compatible = "st,clkgena-divmux-c65-hs",
347 .data = &st_divmux_c65hs,
348 },
349 {
350 .compatible = "st,clkgena-divmux-c65-ls",
351 .data = &st_divmux_c65ls,
352 },
353 {
354 .compatible = "st,clkgena-divmux-c32-odf0",
355 .data = &st_divmux_c32odf0,
356 },
357 {
358 .compatible = "st,clkgena-divmux-c32-odf1",
359 .data = &st_divmux_c32odf1,
360 },
361 {
362 .compatible = "st,clkgena-divmux-c32-odf2",
363 .data = &st_divmux_c32odf2,
364 },
365 {
366 .compatible = "st,clkgena-divmux-c32-odf3",
367 .data = &st_divmux_c32odf3,
368 },
369 {}
370};
371
372static void __iomem * __init clkgen_get_register_base(
373 struct device_node *np)
374{
375 struct device_node *pnode;
376 void __iomem *reg = NULL;
377
378 pnode = of_get_parent(np);
379 if (!pnode)
380 return NULL;
381
382 reg = of_iomap(pnode, 0);
383
384 of_node_put(pnode);
385 return reg;
386}
387
388void __init st_of_clkgena_divmux_setup(struct device_node *np)
389{
390 const struct of_device_id *match;
391 const struct clkgena_divmux_data *data;
392 struct clk_onecell_data *clk_data;
393 void __iomem *reg;
394 const char **parents;
395 int num_parents = 0, i;
396
397 match = of_match_node(clkgena_divmux_of_match, np);
398 if (WARN_ON(!match))
399 return;
400
401 data = (struct clkgena_divmux_data *)match->data;
402
403 reg = clkgen_get_register_base(np);
404 if (!reg)
405 return;
406
407 parents = clkgen_mux_get_parents(np, &num_parents);
408 if (IS_ERR(parents))
409 return;
410
411 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
412 if (!clk_data)
413 goto err;
414
415 clk_data->clk_num = data->num_outputs;
416 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
417 GFP_KERNEL);
418
419 if (!clk_data->clks)
420 goto err;
421
422 for (i = 0; i < clk_data->clk_num; i++) {
423 struct clk *clk;
424 const char *clk_name;
425
426 if (of_property_read_string_index(np, "clock-output-names",
427 i, &clk_name))
428 break;
429
430 /*
431 * If we read an empty clock name then the output is unused
432 */
433 if (*clk_name == '\0')
434 continue;
435
436 clk = clk_register_genamux(clk_name, parents, num_parents,
437 reg, data, i);
438
439 if (IS_ERR(clk))
440 goto err;
441
442 clk_data->clks[i] = clk;
443 }
444
445 kfree(parents);
446
447 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
448 return;
449err:
450 if (clk_data)
451 kfree(clk_data->clks);
452
453 kfree(clk_data);
454 kfree(parents);
455}
456CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
457
458struct clkgena_prediv_data {
459 u32 offset;
460 u8 shift;
461 struct clk_div_table *table;
462};
463
464static struct clk_div_table prediv_table16[] = {
465 { .val = 0, .div = 1 },
466 { .val = 1, .div = 16 },
467 { .div = 0 },
468};
469
470static struct clkgena_prediv_data prediv_c65_data = {
471 .offset = 0x4c,
472 .shift = 31,
473 .table = prediv_table16,
474};
475
476static struct clkgena_prediv_data prediv_c32_data = {
477 .offset = 0x50,
478 .shift = 1,
479 .table = prediv_table16,
480};
481
482static struct of_device_id clkgena_prediv_of_match[] = {
483 { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
484 { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
485 {}
486};
487
488void __init st_of_clkgena_prediv_setup(struct device_node *np)
489{
490 const struct of_device_id *match;
491 void __iomem *reg;
492 const char *parent_name, *clk_name;
493 struct clk *clk;
494 struct clkgena_prediv_data *data;
495
496 match = of_match_node(clkgena_prediv_of_match, np);
497 if (!match) {
498 pr_err("%s: No matching data\n", __func__);
499 return;
500 }
501
502 data = (struct clkgena_prediv_data *)match->data;
503
504 reg = clkgen_get_register_base(np);
505 if (!reg)
506 return;
507
508 parent_name = of_clk_get_parent_name(np, 0);
509 if (!parent_name)
510 return;
511
512 if (of_property_read_string_index(np, "clock-output-names",
513 0, &clk_name))
514 return;
515
516 clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
517 reg + data->offset, data->shift, 1,
518 0, data->table, NULL);
519 if (IS_ERR(clk))
520 return;
521
522 of_clk_add_provider(np, of_clk_src_simple_get, clk);
523 pr_debug("%s: parent %s rate %u\n",
524 __clk_get_name(clk),
525 __clk_get_name(clk_get_parent(clk)),
526 (unsigned int)clk_get_rate(clk));
527
528 return;
529}
530CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
531
532struct clkgen_mux_data {
533 u32 offset;
534 u8 shift;
535 u8 width;
536 spinlock_t *lock;
537 unsigned long clk_flags;
538 u8 mux_flags;
539};
540
541static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = {
542 .offset = 0,
543 .shift = 0,
544 .width = 1,
545};
546
547static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = {
548 .offset = 0,
549 .shift = 0,
550 .width = 1,
551};
552
553static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = {
554 .offset = 0,
555 .shift = 0,
556 .width = 1,
557};
558
559static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = {
560 .offset = 0,
561 .shift = 16,
562 .width = 1,
563 .lock = &clkgenf_lock,
564};
565
566static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = {
567 .offset = 0,
568 .shift = 17,
569 .width = 1,
570 .lock = &clkgenf_lock,
571};
572
573static struct clkgen_mux_data stih415_a9_mux_data = {
574 .offset = 0,
575 .shift = 1,
576 .width = 2,
577};
578static struct clkgen_mux_data stih416_a9_mux_data = {
579 .offset = 0,
580 .shift = 0,
581 .width = 2,
582};
583
584static struct of_device_id mux_of_match[] = {
585 {
586 .compatible = "st,stih416-clkgenc-vcc-hd",
587 .data = &clkgen_mux_c_vcc_hd_416,
588 },
589 {
590 .compatible = "st,stih416-clkgenf-vcc-fvdp",
591 .data = &clkgen_mux_f_vcc_fvdp_416,
592 },
593 {
594 .compatible = "st,stih416-clkgenf-vcc-hva",
595 .data = &clkgen_mux_f_vcc_hva_416,
596 },
597 {
598 .compatible = "st,stih416-clkgenf-vcc-hd",
599 .data = &clkgen_mux_f_vcc_hd_416,
600 },
601 {
602 .compatible = "st,stih416-clkgenf-vcc-sd",
603 .data = &clkgen_mux_c_vcc_sd_416,
604 },
605 {
606 .compatible = "st,stih415-clkgen-a9-mux",
607 .data = &stih415_a9_mux_data,
608 },
609 {
610 .compatible = "st,stih416-clkgen-a9-mux",
611 .data = &stih416_a9_mux_data,
612 },
613 {}
614};
615
616void __init st_of_clkgen_mux_setup(struct device_node *np)
617{
618 const struct of_device_id *match;
619 struct clk *clk;
620 void __iomem *reg;
621 const char **parents;
622 int num_parents;
623 struct clkgen_mux_data *data;
624
625 match = of_match_node(mux_of_match, np);
626 if (!match) {
627 pr_err("%s: No matching data\n", __func__);
628 return;
629 }
630
631 data = (struct clkgen_mux_data *)match->data;
632
633 reg = of_iomap(np, 0);
634 if (!reg) {
635 pr_err("%s: Failed to get base address\n", __func__);
636 return;
637 }
638
639 parents = clkgen_mux_get_parents(np, &num_parents);
640 if (IS_ERR(parents)) {
641 pr_err("%s: Failed to get parents (%ld)\n",
642 __func__, PTR_ERR(parents));
643 return;
644 }
645
646 clk = clk_register_mux(NULL, np->name, parents, num_parents,
647 data->clk_flags | CLK_SET_RATE_PARENT,
648 reg + data->offset,
649 data->shift, data->width, data->mux_flags,
650 data->lock);
651 if (IS_ERR(clk))
652 goto err;
653
654 pr_debug("%s: parent %s rate %u\n",
655 __clk_get_name(clk),
656 __clk_get_name(clk_get_parent(clk)),
657 (unsigned int)clk_get_rate(clk));
658
659 of_clk_add_provider(np, of_clk_src_simple_get, clk);
660
661err:
662 kfree(parents);
663
664 return;
665}
666CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
667
668#define VCC_MAX_CHANNELS 16
669
670#define VCC_GATE_OFFSET 0x0
671#define VCC_MUX_OFFSET 0x4
672#define VCC_DIV_OFFSET 0x8
673
674struct clkgen_vcc_data {
675 spinlock_t *lock;
676 unsigned long clk_flags;
677};
678
679static struct clkgen_vcc_data st_clkgenc_vcc_416 = {
680 .clk_flags = CLK_SET_RATE_PARENT,
681};
682
683static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
684 .lock = &clkgenf_lock,
685};
686
687static struct of_device_id vcc_of_match[] = {
688 { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
689 { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
690 {}
691};
692
693void __init st_of_clkgen_vcc_setup(struct device_node *np)
694{
695 const struct of_device_id *match;
696 void __iomem *reg;
697 const char **parents;
698 int num_parents, i;
699 struct clk_onecell_data *clk_data;
700 struct clkgen_vcc_data *data;
701
702 match = of_match_node(vcc_of_match, np);
703 if (WARN_ON(!match))
704 return;
705 data = (struct clkgen_vcc_data *)match->data;
706
707 reg = of_iomap(np, 0);
708 if (!reg)
709 return;
710
711 parents = clkgen_mux_get_parents(np, &num_parents);
712 if (IS_ERR(parents))
713 return;
714
715 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
716 if (!clk_data)
717 goto err;
718
719 clk_data->clk_num = VCC_MAX_CHANNELS;
720 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
721 GFP_KERNEL);
722
723 if (!clk_data->clks)
724 goto err;
725
726 for (i = 0; i < clk_data->clk_num; i++) {
727 struct clk *clk;
728 const char *clk_name;
729 struct clk_gate *gate;
730 struct clk_divider *div;
731 struct clk_mux *mux;
732
733 if (of_property_read_string_index(np, "clock-output-names",
734 i, &clk_name))
735 break;
736
737 /*
738 * If we read an empty clock name then the output is unused
739 */
740 if (*clk_name == '\0')
741 continue;
742
743 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
744 if (!gate)
745 break;
746
747 div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
748 if (!div) {
749 kfree(gate);
750 break;
751 }
752
753 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
754 if (!mux) {
755 kfree(gate);
756 kfree(div);
757 break;
758 }
759
760 gate->reg = reg + VCC_GATE_OFFSET;
761 gate->bit_idx = i;
762 gate->flags = CLK_GATE_SET_TO_DISABLE;
763 gate->lock = data->lock;
764
765 div->reg = reg + VCC_DIV_OFFSET;
766 div->shift = 2 * i;
767 div->width = 2;
768 div->flags = CLK_DIVIDER_POWER_OF_TWO;
769
770 mux->reg = reg + VCC_MUX_OFFSET;
771 mux->shift = 2 * i;
772 mux->mask = 0x3;
773
774 clk = clk_register_composite(NULL, clk_name, parents,
775 num_parents,
776 &mux->hw, &clk_mux_ops,
777 &div->hw, &clk_divider_ops,
778 &gate->hw, &clk_gate_ops,
779 data->clk_flags);
780 if (IS_ERR(clk)) {
781 kfree(gate);
782 kfree(div);
783 kfree(mux);
784 goto err;
785 }
786
787 pr_debug("%s: parent %s rate %u\n",
788 __clk_get_name(clk),
789 __clk_get_name(clk_get_parent(clk)),
790 (unsigned int)clk_get_rate(clk));
791
792 clk_data->clks[i] = clk;
793 }
794
795 kfree(parents);
796
797 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
798 return;
799
800err:
801 for (i = 0; i < clk_data->clk_num; i++) {
802 struct clk_composite *composite;
803
804 if (!clk_data->clks[i])
805 continue;
806
807 composite = container_of(__clk_get_hw(clk_data->clks[i]),
808 struct clk_composite, hw);
809 kfree(container_of(composite->gate_hw, struct clk_gate, hw));
810 kfree(container_of(composite->rate_hw, struct clk_divider, hw));
811 kfree(container_of(composite->mux_hw, struct clk_mux, hw));
812 }
813
814 if (clk_data)
815 kfree(clk_data->clks);
816
817 kfree(clk_data);
818 kfree(parents);
819}
820CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
new file mode 100644
index 000000000000..bca0a0badbfa
--- /dev/null
+++ b/drivers/clk/st/clkgen-pll.c
@@ -0,0 +1,698 @@
1/*
2 * Copyright (C) 2014 STMicroelectronics (R&D) Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 */
10
11/*
12 * Authors:
13 * Stephen Gallimore <stephen.gallimore@st.com>,
14 * Pankaj Dev <pankaj.dev@st.com>.
15 */
16
17#include <linux/slab.h>
18#include <linux/of_address.h>
19#include <linux/clk-provider.h>
20
21#include "clkgen.h"
22
23static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
24
25/*
26 * Common PLL configuration register bits for PLL800 and PLL1600 C65
27 */
28#define C65_MDIV_PLL800_MASK (0xff)
29#define C65_MDIV_PLL1600_MASK (0x7)
30#define C65_NDIV_MASK (0xff)
31#define C65_PDIV_MASK (0x7)
32
33/*
34 * PLL configuration register bits for PLL3200 C32
35 */
36#define C32_NDIV_MASK (0xff)
37#define C32_IDF_MASK (0x7)
38#define C32_ODF_MASK (0x3f)
39#define C32_LDF_MASK (0x7f)
40
41#define C32_MAX_ODFS (4)
42
43struct clkgen_pll_data {
44 struct clkgen_field pdn_status;
45 struct clkgen_field locked_status;
46 struct clkgen_field mdiv;
47 struct clkgen_field ndiv;
48 struct clkgen_field pdiv;
49 struct clkgen_field idf;
50 struct clkgen_field ldf;
51 unsigned int num_odfs;
52 struct clkgen_field odf[C32_MAX_ODFS];
53 struct clkgen_field odf_gate[C32_MAX_ODFS];
54 const struct clk_ops *ops;
55};
56
57static const struct clk_ops st_pll1600c65_ops;
58static const struct clk_ops st_pll800c65_ops;
59static const struct clk_ops stm_pll3200c32_ops;
60static const struct clk_ops st_pll1200c32_ops;
61
62static struct clkgen_pll_data st_pll1600c65_ax = {
63 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
64 .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
65 .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0),
66 .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
67 .ops = &st_pll1600c65_ops
68};
69
70static struct clkgen_pll_data st_pll800c65_ax = {
71 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
72 .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
73 .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0),
74 .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
75 .pdiv = CLKGEN_FIELD(0x0, C65_PDIV_MASK, 16),
76 .ops = &st_pll800c65_ops
77};
78
79static struct clkgen_pll_data st_pll3200c32_a1x_0 = {
80 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 31),
81 .locked_status = CLKGEN_FIELD(0x4, 0x1, 31),
82 .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0),
83 .idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0),
84 .num_odfs = 4,
85 .odf = { CLKGEN_FIELD(0x54, C32_ODF_MASK, 4),
86 CLKGEN_FIELD(0x54, C32_ODF_MASK, 10),
87 CLKGEN_FIELD(0x54, C32_ODF_MASK, 16),
88 CLKGEN_FIELD(0x54, C32_ODF_MASK, 22) },
89 .odf_gate = { CLKGEN_FIELD(0x54, 0x1, 0),
90 CLKGEN_FIELD(0x54, 0x1, 1),
91 CLKGEN_FIELD(0x54, 0x1, 2),
92 CLKGEN_FIELD(0x54, 0x1, 3) },
93 .ops = &stm_pll3200c32_ops,
94};
95
96static struct clkgen_pll_data st_pll3200c32_a1x_1 = {
97 .pdn_status = CLKGEN_FIELD(0xC, 0x1, 31),
98 .locked_status = CLKGEN_FIELD(0x10, 0x1, 31),
99 .ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0),
100 .idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0),
101 .num_odfs = 4,
102 .odf = { CLKGEN_FIELD(0x58, C32_ODF_MASK, 4),
103 CLKGEN_FIELD(0x58, C32_ODF_MASK, 10),
104 CLKGEN_FIELD(0x58, C32_ODF_MASK, 16),
105 CLKGEN_FIELD(0x58, C32_ODF_MASK, 22) },
106 .odf_gate = { CLKGEN_FIELD(0x58, 0x1, 0),
107 CLKGEN_FIELD(0x58, 0x1, 1),
108 CLKGEN_FIELD(0x58, 0x1, 2),
109 CLKGEN_FIELD(0x58, 0x1, 3) },
110 .ops = &stm_pll3200c32_ops,
111};
112
113/* 415 specific */
114static struct clkgen_pll_data st_pll3200c32_a9_415 = {
115 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
116 .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
117 .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9),
118 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22),
119 .num_odfs = 1,
120 .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 3) },
121 .odf_gate = { CLKGEN_FIELD(0x0, 0x1, 28) },
122 .ops = &stm_pll3200c32_ops,
123};
124
125static struct clkgen_pll_data st_pll3200c32_ddr_415 = {
126 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
127 .locked_status = CLKGEN_FIELD(0x100, 0x1, 0),
128 .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
129 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
130 .num_odfs = 2,
131 .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
132 CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
133 .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
134 CLKGEN_FIELD(0x4, 0x1, 29) },
135 .ops = &stm_pll3200c32_ops,
136};
137
138static struct clkgen_pll_data st_pll1200c32_gpu_415 = {
139 .pdn_status = CLKGEN_FIELD(0x144, 0x1, 3),
140 .locked_status = CLKGEN_FIELD(0x168, 0x1, 0),
141 .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
142 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
143 .num_odfs = 0,
144 .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
145 .ops = &st_pll1200c32_ops,
146};
147
148/* 416 specific */
149static struct clkgen_pll_data st_pll3200c32_a9_416 = {
150 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
151 .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
152 .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
153 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
154 .num_odfs = 1,
155 .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8) },
156 .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28) },
157 .ops = &stm_pll3200c32_ops,
158};
159
160static struct clkgen_pll_data st_pll3200c32_ddr_416 = {
161 .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
162 .locked_status = CLKGEN_FIELD(0x10C, 0x1, 0),
163 .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
164 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
165 .num_odfs = 2,
166 .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
167 CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
168 .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
169 CLKGEN_FIELD(0x4, 0x1, 29) },
170 .ops = &stm_pll3200c32_ops,
171};
172
173static struct clkgen_pll_data st_pll1200c32_gpu_416 = {
174 .pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3),
175 .locked_status = CLKGEN_FIELD(0x90C, 0x1, 0),
176 .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
177 .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
178 .num_odfs = 0,
179 .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
180 .ops = &st_pll1200c32_ops,
181};
182
183/**
184 * DOC: Clock Generated by PLL, rate set and enabled by bootloader
185 *
186 * Traits of this clock:
187 * prepare - clk_(un)prepare only ensures parent is (un)prepared
188 * enable - clk_enable/disable only ensures parent is enabled
189 * rate - rate is fixed. No clk_set_rate support
190 * parent - fixed parent. No clk_set_parent support
191 */
192
193/**
194 * PLL clock that is integrated in the ClockGenA instances on the STiH415
195 * and STiH416.
196 *
197 * @hw: handle between common and hardware-specific interfaces.
198 * @type: PLL instance type.
199 * @regs_base: base of the PLL configuration register(s).
200 *
201 */
202struct clkgen_pll {
203 struct clk_hw hw;
204 struct clkgen_pll_data *data;
205 void __iomem *regs_base;
206};
207
208#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw)
209
210static int clkgen_pll_is_locked(struct clk_hw *hw)
211{
212 struct clkgen_pll *pll = to_clkgen_pll(hw);
213 u32 locked = CLKGEN_READ(pll, locked_status);
214
215 return !!locked;
216}
217
218static int clkgen_pll_is_enabled(struct clk_hw *hw)
219{
220 struct clkgen_pll *pll = to_clkgen_pll(hw);
221 u32 poweroff = CLKGEN_READ(pll, pdn_status);
222 return !poweroff;
223}
224
225unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
226 unsigned long parent_rate)
227{
228 struct clkgen_pll *pll = to_clkgen_pll(hw);
229 unsigned long mdiv, ndiv, pdiv;
230 unsigned long rate;
231 uint64_t res;
232
233 if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
234 return 0;
235
236 pdiv = CLKGEN_READ(pll, pdiv);
237 mdiv = CLKGEN_READ(pll, mdiv);
238 ndiv = CLKGEN_READ(pll, ndiv);
239
240 if (!mdiv)
241 mdiv++; /* mdiv=0 or 1 => MDIV=1 */
242
243 res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv;
244 rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv));
245
246 pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
247
248 return rate;
249
250}
251
252unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
253 unsigned long parent_rate)
254{
255 struct clkgen_pll *pll = to_clkgen_pll(hw);
256 unsigned long mdiv, ndiv;
257 unsigned long rate;
258
259 if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
260 return 0;
261
262 mdiv = CLKGEN_READ(pll, mdiv);
263 ndiv = CLKGEN_READ(pll, ndiv);
264
265 if (!mdiv)
266 mdiv = 1;
267
268 /* Note: input is divided by 1000 to avoid overflow */
269 rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000;
270
271 pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
272
273 return rate;
274}
275
276unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
277 unsigned long parent_rate)
278{
279 struct clkgen_pll *pll = to_clkgen_pll(hw);
280 unsigned long ndiv, idf;
281 unsigned long rate = 0;
282
283 if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
284 return 0;
285
286 ndiv = CLKGEN_READ(pll, ndiv);
287 idf = CLKGEN_READ(pll, idf);
288
289 if (idf)
290 /* Note: input is divided to avoid overflow */
291 rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000;
292
293 pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
294
295 return rate;
296}
297
298unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
299 unsigned long parent_rate)
300{
301 struct clkgen_pll *pll = to_clkgen_pll(hw);
302 unsigned long odf, ldf, idf;
303 unsigned long rate;
304
305 if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
306 return 0;
307
308 odf = CLKGEN_READ(pll, odf[0]);
309 ldf = CLKGEN_READ(pll, ldf);
310 idf = CLKGEN_READ(pll, idf);
311
312 if (!idf) /* idf==0 means 1 */
313 idf = 1;
314 if (!odf) /* odf==0 means 1 */
315 odf = 1;
316
317 /* Note: input is divided by 1000 to avoid overflow */
318 rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000;
319
320 pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
321
322 return rate;
323}
324
325static const struct clk_ops st_pll1600c65_ops = {
326 .is_enabled = clkgen_pll_is_enabled,
327 .recalc_rate = recalc_stm_pll1600c65,
328};
329
330static const struct clk_ops st_pll800c65_ops = {
331 .is_enabled = clkgen_pll_is_enabled,
332 .recalc_rate = recalc_stm_pll800c65,
333};
334
335static const struct clk_ops stm_pll3200c32_ops = {
336 .is_enabled = clkgen_pll_is_enabled,
337 .recalc_rate = recalc_stm_pll3200c32,
338};
339
340static const struct clk_ops st_pll1200c32_ops = {
341 .is_enabled = clkgen_pll_is_enabled,
342 .recalc_rate = recalc_stm_pll1200c32,
343};
344
345static struct clk * __init clkgen_pll_register(const char *parent_name,
346 struct clkgen_pll_data *pll_data,
347 void __iomem *reg,
348 const char *clk_name)
349{
350 struct clkgen_pll *pll;
351 struct clk *clk;
352 struct clk_init_data init;
353
354 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
355 if (!pll)
356 return ERR_PTR(-ENOMEM);
357
358 init.name = clk_name;
359 init.ops = pll_data->ops;
360
361 init.flags = CLK_IS_BASIC;
362 init.parent_names = &parent_name;
363 init.num_parents = 1;
364
365 pll->data = pll_data;
366 pll->regs_base = reg;
367 pll->hw.init = &init;
368
369 clk = clk_register(NULL, &pll->hw);
370 if (IS_ERR(clk)) {
371 kfree(pll);
372 return clk;
373 }
374
375 pr_debug("%s: parent %s rate %lu\n",
376 __clk_get_name(clk),
377 __clk_get_name(clk_get_parent(clk)),
378 clk_get_rate(clk));
379
380 return clk;
381}
382
383static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name,
384 const char *clk_name)
385{
386 struct clk *clk;
387
388 clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2);
389 if (IS_ERR(clk))
390 return clk;
391
392 pr_debug("%s: parent %s rate %lu\n",
393 __clk_get_name(clk),
394 __clk_get_name(clk_get_parent(clk)),
395 clk_get_rate(clk));
396 return clk;
397}
398
399static void __iomem * __init clkgen_get_register_base(
400 struct device_node *np)
401{
402 struct device_node *pnode;
403 void __iomem *reg = NULL;
404
405 pnode = of_get_parent(np);
406 if (!pnode)
407 return NULL;
408
409 reg = of_iomap(pnode, 0);
410
411 of_node_put(pnode);
412 return reg;
413}
414
415#define CLKGENAx_PLL0_OFFSET 0x0
416#define CLKGENAx_PLL1_OFFSET 0x4
417
418static void __init clkgena_c65_pll_setup(struct device_node *np)
419{
420 const int num_pll_outputs = 3;
421 struct clk_onecell_data *clk_data;
422 const char *parent_name;
423 void __iomem *reg;
424 const char *clk_name;
425
426 parent_name = of_clk_get_parent_name(np, 0);
427 if (!parent_name)
428 return;
429
430 reg = clkgen_get_register_base(np);
431 if (!reg)
432 return;
433
434 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
435 if (!clk_data)
436 return;
437
438 clk_data->clk_num = num_pll_outputs;
439 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
440 GFP_KERNEL);
441
442 if (!clk_data->clks)
443 goto err;
444
445 if (of_property_read_string_index(np, "clock-output-names",
446 0, &clk_name))
447 goto err;
448
449 /*
450 * PLL0 HS (high speed) output
451 */
452 clk_data->clks[0] = clkgen_pll_register(parent_name,
453 &st_pll1600c65_ax,
454 reg + CLKGENAx_PLL0_OFFSET,
455 clk_name);
456
457 if (IS_ERR(clk_data->clks[0]))
458 goto err;
459
460 if (of_property_read_string_index(np, "clock-output-names",
461 1, &clk_name))
462 goto err;
463
464 /*
465 * PLL0 LS (low speed) output, which is a fixed divide by 2 of the
466 * high speed output.
467 */
468 clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name
469 (clk_data->clks[0]),
470 clk_name);
471
472 if (IS_ERR(clk_data->clks[1]))
473 goto err;
474
475 if (of_property_read_string_index(np, "clock-output-names",
476 2, &clk_name))
477 goto err;
478
479 /*
480 * PLL1 output
481 */
482 clk_data->clks[2] = clkgen_pll_register(parent_name,
483 &st_pll800c65_ax,
484 reg + CLKGENAx_PLL1_OFFSET,
485 clk_name);
486
487 if (IS_ERR(clk_data->clks[2]))
488 goto err;
489
490 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
491 return;
492
493err:
494 kfree(clk_data->clks);
495 kfree(clk_data);
496}
497CLK_OF_DECLARE(clkgena_c65_plls,
498 "st,clkgena-plls-c65", clkgena_c65_pll_setup);
499
500static struct clk * __init clkgen_odf_register(const char *parent_name,
501 void * __iomem reg,
502 struct clkgen_pll_data *pll_data,
503 int odf,
504 spinlock_t *odf_lock,
505 const char *odf_name)
506{
507 struct clk *clk;
508 unsigned long flags;
509 struct clk_gate *gate;
510 struct clk_divider *div;
511
512 flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE;
513
514 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
515 if (!gate)
516 return ERR_PTR(-ENOMEM);
517
518 gate->flags = CLK_GATE_SET_TO_DISABLE;
519 gate->reg = reg + pll_data->odf_gate[odf].offset;
520 gate->bit_idx = pll_data->odf_gate[odf].shift;
521 gate->lock = odf_lock;
522
523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div)
525 return ERR_PTR(-ENOMEM);
526
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset;
529 div->shift = pll_data->odf[odf].shift;
530 div->width = fls(pll_data->odf[odf].mask);
531 div->lock = odf_lock;
532
533 clk = clk_register_composite(NULL, odf_name, &parent_name, 1,
534 NULL, NULL,
535 &div->hw, &clk_divider_ops,
536 &gate->hw, &clk_gate_ops,
537 flags);
538 if (IS_ERR(clk))
539 return clk;
540
541 pr_debug("%s: parent %s rate %lu\n",
542 __clk_get_name(clk),
543 __clk_get_name(clk_get_parent(clk)),
544 clk_get_rate(clk));
545 return clk;
546}
547
548static struct of_device_id c32_pll_of_match[] = {
549 {
550 .compatible = "st,plls-c32-a1x-0",
551 .data = &st_pll3200c32_a1x_0,
552 },
553 {
554 .compatible = "st,plls-c32-a1x-1",
555 .data = &st_pll3200c32_a1x_1,
556 },
557 {
558 .compatible = "st,stih415-plls-c32-a9",
559 .data = &st_pll3200c32_a9_415,
560 },
561 {
562 .compatible = "st,stih415-plls-c32-ddr",
563 .data = &st_pll3200c32_ddr_415,
564 },
565 {
566 .compatible = "st,stih416-plls-c32-a9",
567 .data = &st_pll3200c32_a9_416,
568 },
569 {
570 .compatible = "st,stih416-plls-c32-ddr",
571 .data = &st_pll3200c32_ddr_416,
572 },
573 {}
574};
575
576static void __init clkgen_c32_pll_setup(struct device_node *np)
577{
578 const struct of_device_id *match;
579 struct clk *clk;
580 const char *parent_name, *pll_name;
581 void __iomem *pll_base;
582 int num_odfs, odf;
583 struct clk_onecell_data *clk_data;
584 struct clkgen_pll_data *data;
585
586 match = of_match_node(c32_pll_of_match, np);
587 if (!match) {
588 pr_err("%s: No matching data\n", __func__);
589 return;
590 }
591
592 data = (struct clkgen_pll_data *) match->data;
593
594 parent_name = of_clk_get_parent_name(np, 0);
595 if (!parent_name)
596 return;
597
598 pll_base = clkgen_get_register_base(np);
599 if (!pll_base)
600 return;
601
602 clk = clkgen_pll_register(parent_name, data, pll_base, np->name);
603 if (IS_ERR(clk))
604 return;
605
606 pll_name = __clk_get_name(clk);
607
608 num_odfs = data->num_odfs;
609
610 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
611 if (!clk_data)
612 return;
613
614 clk_data->clk_num = num_odfs;
615 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
616 GFP_KERNEL);
617
618 if (!clk_data->clks)
619 goto err;
620
621 for (odf = 0; odf < num_odfs; odf++) {
622 struct clk *clk;
623 const char *clk_name;
624
625 if (of_property_read_string_index(np, "clock-output-names",
626 odf, &clk_name))
627 return;
628
629 clk = clkgen_odf_register(pll_name, pll_base, data,
630 odf, &clkgena_c32_odf_lock, clk_name);
631 if (IS_ERR(clk))
632 goto err;
633
634 clk_data->clks[odf] = clk;
635 }
636
637 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
638 return;
639
640err:
641 kfree(pll_name);
642 kfree(clk_data->clks);
643 kfree(clk_data);
644}
645CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup);
646
647static struct of_device_id c32_gpu_pll_of_match[] = {
648 {
649 .compatible = "st,stih415-gpu-pll-c32",
650 .data = &st_pll1200c32_gpu_415,
651 },
652 {
653 .compatible = "st,stih416-gpu-pll-c32",
654 .data = &st_pll1200c32_gpu_416,
655 },
656};
657
658static void __init clkgengpu_c32_pll_setup(struct device_node *np)
659{
660 const struct of_device_id *match;
661 struct clk *clk;
662 const char *parent_name;
663 void __iomem *reg;
664 const char *clk_name;
665 struct clkgen_pll_data *data;
666
667 match = of_match_node(c32_gpu_pll_of_match, np);
668 if (!match) {
669 pr_err("%s: No matching data\n", __func__);
670 return;
671 }
672
673 data = (struct clkgen_pll_data *)match->data;
674
675 parent_name = of_clk_get_parent_name(np, 0);
676 if (!parent_name)
677 return;
678
679 reg = clkgen_get_register_base(np);
680 if (!reg)
681 return;
682
683 if (of_property_read_string_index(np, "clock-output-names",
684 0, &clk_name))
685 return;
686
687 /*
688 * PLL 1200MHz output
689 */
690 clk = clkgen_pll_register(parent_name, data, reg, clk_name);
691
692 if (!IS_ERR(clk))
693 of_clk_add_provider(np, of_clk_src_simple_get, clk);
694
695 return;
696}
697CLK_OF_DECLARE(clkgengpu_c32_pll,
698 "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup);
diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h
new file mode 100644
index 000000000000..35c863295268
--- /dev/null
+++ b/drivers/clk/st/clkgen.h
@@ -0,0 +1,48 @@
1/************************************************************************
2File : Clock H/w specific Information
3
4Author: Pankaj Dev <pankaj.dev@st.com>
5
6Copyright (C) 2014 STMicroelectronics
7************************************************************************/
8
9#ifndef __CLKGEN_INFO_H
10#define __CLKGEN_INFO_H
11
12struct clkgen_field {
13 unsigned int offset;
14 unsigned int mask;
15 unsigned int shift;
16};
17
18static inline unsigned long clkgen_read(void __iomem *base,
19 struct clkgen_field *field)
20{
21 return (readl(base + field->offset) >> field->shift) & field->mask;
22}
23
24
25static inline void clkgen_write(void __iomem *base, struct clkgen_field *field,
26 unsigned long val)
27{
28 writel((readl(base + field->offset) &
29 ~(field->mask << field->shift)) | (val << field->shift),
30 base + field->offset);
31
32 return;
33}
34
35#define CLKGEN_FIELD(_offset, _mask, _shift) { \
36 .offset = _offset, \
37 .mask = _mask, \
38 .shift = _shift, \
39 }
40
41#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \
42 &pll->data->field)
43
44#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \
45 &pll->data->field, val)
46
47#endif /*__CLKGEN_INFO_H*/
48
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index abb6c5ac8a10..bd7dc733c1ca 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -18,6 +18,7 @@
18#include <linux/clkdev.h> 18#include <linux/clkdev.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/reset-controller.h>
21 22
22#include "clk-factors.h" 23#include "clk-factors.h"
23 24
@@ -51,6 +52,8 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
51 if (!gate) 52 if (!gate)
52 goto err_free_fixed; 53 goto err_free_fixed;
53 54
55 of_property_read_string(node, "clock-output-names", &clk_name);
56
54 /* set up gate and fixed rate properties */ 57 /* set up gate and fixed rate properties */
55 gate->reg = of_iomap(node, 0); 58 gate->reg = of_iomap(node, 0);
56 gate->bit_idx = SUNXI_OSC24M_GATE; 59 gate->bit_idx = SUNXI_OSC24M_GATE;
@@ -77,7 +80,7 @@ err_free_gate:
77err_free_fixed: 80err_free_fixed:
78 kfree(fixed); 81 kfree(fixed);
79} 82}
80CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup); 83CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
81 84
82 85
83 86
@@ -249,7 +252,38 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
249 *n = DIV_ROUND_UP(div, (*k+1)); 252 *n = DIV_ROUND_UP(div, (*k+1));
250} 253}
251 254
255/**
256 * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
257 * PLL6 rate is calculated as follows
258 * rate = parent_rate * n * (k + 1) / 2
259 * parent_rate is always 24Mhz
260 */
261
262static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
263 u8 *n, u8 *k, u8 *m, u8 *p)
264{
265 u8 div;
266
267 /*
268 * We always have 24MHz / 2, so we can just say that our
269 * parent clock is 12MHz.
270 */
271 parent_rate = parent_rate / 2;
252 272
273 /* Normalize value to a parent_rate multiple (24M / 2) */
274 div = *freq / parent_rate;
275 *freq = parent_rate * div;
276
277 /* we were called to round the frequency, we can now return */
278 if (n == NULL)
279 return;
280
281 *k = div / 32;
282 if (*k > 3)
283 *k = 3;
284
285 *n = DIV_ROUND_UP(div, (*k+1));
286}
253 287
254/** 288/**
255 * sun4i_get_apb1_factors() - calculates m, p factors for APB1 289 * sun4i_get_apb1_factors() - calculates m, p factors for APB1
@@ -265,7 +299,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
265 if (parent_rate < *freq) 299 if (parent_rate < *freq)
266 *freq = parent_rate; 300 *freq = parent_rate;
267 301
268 parent_rate = (parent_rate + (*freq - 1)) / *freq; 302 parent_rate = DIV_ROUND_UP(parent_rate, *freq);
269 303
270 /* Invalid rate! */ 304 /* Invalid rate! */
271 if (parent_rate > 32) 305 if (parent_rate > 32)
@@ -296,7 +330,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
296 330
297/** 331/**
298 * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks 332 * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
299 * MMC rate is calculated as follows 333 * MOD0 rate is calculated as follows
300 * rate = (parent_rate >> p) / (m + 1); 334 * rate = (parent_rate >> p) / (m + 1);
301 */ 335 */
302 336
@@ -310,7 +344,7 @@ static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate,
310 if (*freq > parent_rate) 344 if (*freq > parent_rate)
311 *freq = parent_rate; 345 *freq = parent_rate;
312 346
313 div = parent_rate / *freq; 347 div = DIV_ROUND_UP(parent_rate, *freq);
314 348
315 if (div < 16) 349 if (div < 16)
316 calcp = 0; 350 calcp = 0;
@@ -351,7 +385,7 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
351 if (*freq > parent_rate) 385 if (*freq > parent_rate)
352 *freq = parent_rate; 386 *freq = parent_rate;
353 387
354 div = parent_rate / *freq; 388 div = DIV_ROUND_UP(parent_rate, *freq);
355 389
356 if (div < 32) 390 if (div < 32)
357 calcp = 0; 391 calcp = 0;
@@ -377,6 +411,102 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
377 411
378 412
379/** 413/**
414 * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
415 *
416 * This clock looks something like this
417 * ________________________
418 * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
419 * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
420 * Ext. 125MHz RGMII TX clk >--|__divider__/ |
421 * |________________________|
422 *
423 * The external 125 MHz reference is optional, i.e. GMAC can use its
424 * internal TX clock just fine. The A31 GMAC clock module does not have
425 * the divider controls for the external reference.
426 *
427 * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
428 * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
429 * select the appropriate source and gate/ungate the output to the PHY.
430 *
431 * Only the GMAC should use this clock. Altering the clock so that it doesn't
432 * match the GMAC's operation parameters will result in the GMAC not being
433 * able to send traffic out. The GMAC driver should set the clock rate and
434 * enable/disable this clock to configure the required state. The clock
435 * driver then responds by auto-reparenting the clock.
436 */
437
438#define SUN7I_A20_GMAC_GPIT 2
439#define SUN7I_A20_GMAC_MASK 0x3
440#define SUN7I_A20_GMAC_PARENTS 2
441
442static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
443{
444 struct clk *clk;
445 struct clk_mux *mux;
446 struct clk_gate *gate;
447 const char *clk_name = node->name;
448 const char *parents[SUN7I_A20_GMAC_PARENTS];
449 void *reg;
450
451 if (of_property_read_string(node, "clock-output-names", &clk_name))
452 return;
453
454 /* allocate mux and gate clock structs */
455 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
456 if (!mux)
457 return;
458
459 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
460 if (!gate)
461 goto free_mux;
462
463 /* gmac clock requires exactly 2 parents */
464 parents[0] = of_clk_get_parent_name(node, 0);
465 parents[1] = of_clk_get_parent_name(node, 1);
466 if (!parents[0] || !parents[1])
467 goto free_gate;
468
469 reg = of_iomap(node, 0);
470 if (!reg)
471 goto free_gate;
472
473 /* set up gate and fixed rate properties */
474 gate->reg = reg;
475 gate->bit_idx = SUN7I_A20_GMAC_GPIT;
476 gate->lock = &clk_lock;
477 mux->reg = reg;
478 mux->mask = SUN7I_A20_GMAC_MASK;
479 mux->flags = CLK_MUX_INDEX_BIT;
480 mux->lock = &clk_lock;
481
482 clk = clk_register_composite(NULL, clk_name,
483 parents, SUN7I_A20_GMAC_PARENTS,
484 &mux->hw, &clk_mux_ops,
485 NULL, NULL,
486 &gate->hw, &clk_gate_ops,
487 0);
488
489 if (IS_ERR(clk))
490 goto iounmap_reg;
491
492 of_clk_add_provider(node, of_clk_src_simple_get, clk);
493 clk_register_clkdev(clk, clk_name, NULL);
494
495 return;
496
497iounmap_reg:
498 iounmap(reg);
499free_gate:
500 kfree(gate);
501free_mux:
502 kfree(mux);
503}
504CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
505 sun7i_a20_gmac_clk_setup);
506
507
508
509/**
380 * sunxi_factors_clk_setup() - Setup function for factor clocks 510 * sunxi_factors_clk_setup() - Setup function for factor clocks
381 */ 511 */
382 512
@@ -387,6 +517,7 @@ struct factors_data {
387 int mux; 517 int mux;
388 struct clk_factors_config *table; 518 struct clk_factors_config *table;
389 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p); 519 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
520 const char *name;
390}; 521};
391 522
392static struct clk_factors_config sun4i_pll1_config = { 523static struct clk_factors_config sun4i_pll1_config = {
@@ -416,6 +547,13 @@ static struct clk_factors_config sun4i_pll5_config = {
416 .kwidth = 2, 547 .kwidth = 2,
417}; 548};
418 549
550static struct clk_factors_config sun6i_a31_pll6_config = {
551 .nshift = 8,
552 .nwidth = 5,
553 .kshift = 4,
554 .kwidth = 2,
555};
556
419static struct clk_factors_config sun4i_apb1_config = { 557static struct clk_factors_config sun4i_apb1_config = {
420 .mshift = 0, 558 .mshift = 0,
421 .mwidth = 5, 559 .mwidth = 5,
@@ -451,10 +589,30 @@ static const struct factors_data sun6i_a31_pll1_data __initconst = {
451 .getter = sun6i_a31_get_pll1_factors, 589 .getter = sun6i_a31_get_pll1_factors,
452}; 590};
453 591
592static const struct factors_data sun7i_a20_pll4_data __initconst = {
593 .enable = 31,
594 .table = &sun4i_pll5_config,
595 .getter = sun4i_get_pll5_factors,
596};
597
454static const struct factors_data sun4i_pll5_data __initconst = { 598static const struct factors_data sun4i_pll5_data __initconst = {
455 .enable = 31, 599 .enable = 31,
456 .table = &sun4i_pll5_config, 600 .table = &sun4i_pll5_config,
457 .getter = sun4i_get_pll5_factors, 601 .getter = sun4i_get_pll5_factors,
602 .name = "pll5",
603};
604
605static const struct factors_data sun4i_pll6_data __initconst = {
606 .enable = 31,
607 .table = &sun4i_pll5_config,
608 .getter = sun4i_get_pll5_factors,
609 .name = "pll6",
610};
611
612static const struct factors_data sun6i_a31_pll6_data __initconst = {
613 .enable = 31,
614 .table = &sun6i_a31_pll6_config,
615 .getter = sun6i_a31_get_pll6_factors,
458}; 616};
459 617
460static const struct factors_data sun4i_apb1_data __initconst = { 618static const struct factors_data sun4i_apb1_data __initconst = {
@@ -497,14 +655,14 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
497 (parents[i] = of_clk_get_parent_name(node, i)) != NULL) 655 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
498 i++; 656 i++;
499 657
500 /* Nodes should be providing the name via clock-output-names 658 /*
501 * but originally our dts didn't, and so we used node->name. 659 * some factor clocks, such as pll5 and pll6, may have multiple
502 * The new, better nodes look like clk@deadbeef, so we pull the 660 * outputs, and have their name designated in factors_data
503 * name just in this case */ 661 */
504 if (!strcmp("clk", clk_name)) { 662 if (data->name)
505 of_property_read_string_index(node, "clock-output-names", 663 clk_name = data->name;
506 0, &clk_name); 664 else
507 } 665 of_property_read_string(node, "clock-output-names", &clk_name);
508 666
509 factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL); 667 factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
510 if (!factors) 668 if (!factors)
@@ -601,6 +759,8 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
601 (parents[i] = of_clk_get_parent_name(node, i)) != NULL) 759 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
602 i++; 760 i++;
603 761
762 of_property_read_string(node, "clock-output-names", &clk_name);
763
604 clk = clk_register_mux(NULL, clk_name, parents, i, 764 clk = clk_register_mux(NULL, clk_name, parents, i,
605 CLK_SET_RATE_NO_REPARENT, reg, 765 CLK_SET_RATE_NO_REPARENT, reg,
606 data->shift, SUNXI_MUX_GATE_WIDTH, 766 data->shift, SUNXI_MUX_GATE_WIDTH,
@@ -660,6 +820,8 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
660 820
661 clk_parent = of_clk_get_parent_name(node, 0); 821 clk_parent = of_clk_get_parent_name(node, 0);
662 822
823 of_property_read_string(node, "clock-output-names", &clk_name);
824
663 clk = clk_register_divider(NULL, clk_name, clk_parent, 0, 825 clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
664 reg, data->shift, data->width, 826 reg, data->shift, data->width,
665 data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0, 827 data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
@@ -673,6 +835,59 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
673 835
674 836
675/** 837/**
838 * sunxi_gates_reset... - reset bits in leaf gate clk registers handling
839 */
840
841struct gates_reset_data {
842 void __iomem *reg;
843 spinlock_t *lock;
844 struct reset_controller_dev rcdev;
845};
846
847static int sunxi_gates_reset_assert(struct reset_controller_dev *rcdev,
848 unsigned long id)
849{
850 struct gates_reset_data *data = container_of(rcdev,
851 struct gates_reset_data,
852 rcdev);
853 unsigned long flags;
854 u32 reg;
855
856 spin_lock_irqsave(data->lock, flags);
857
858 reg = readl(data->reg);
859 writel(reg & ~BIT(id), data->reg);
860
861 spin_unlock_irqrestore(data->lock, flags);
862
863 return 0;
864}
865
866static int sunxi_gates_reset_deassert(struct reset_controller_dev *rcdev,
867 unsigned long id)
868{
869 struct gates_reset_data *data = container_of(rcdev,
870 struct gates_reset_data,
871 rcdev);
872 unsigned long flags;
873 u32 reg;
874
875 spin_lock_irqsave(data->lock, flags);
876
877 reg = readl(data->reg);
878 writel(reg | BIT(id), data->reg);
879
880 spin_unlock_irqrestore(data->lock, flags);
881
882 return 0;
883}
884
885static struct reset_control_ops sunxi_gates_reset_ops = {
886 .assert = sunxi_gates_reset_assert,
887 .deassert = sunxi_gates_reset_deassert,
888};
889
890/**
676 * sunxi_gates_clk_setup() - Setup function for leaf gates on clocks 891 * sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
677 */ 892 */
678 893
@@ -680,6 +895,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
680 895
681struct gates_data { 896struct gates_data {
682 DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE); 897 DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
898 u32 reset_mask;
683}; 899};
684 900
685static const struct gates_data sun4i_axi_gates_data __initconst = { 901static const struct gates_data sun4i_axi_gates_data __initconst = {
@@ -746,10 +962,21 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
746 .mask = { 0xff80ff }, 962 .mask = { 0xff80ff },
747}; 963};
748 964
965static const struct gates_data sun4i_a10_usb_gates_data __initconst = {
966 .mask = {0x1C0},
967 .reset_mask = 0x07,
968};
969
970static const struct gates_data sun5i_a13_usb_gates_data __initconst = {
971 .mask = {0x140},
972 .reset_mask = 0x03,
973};
974
749static void __init sunxi_gates_clk_setup(struct device_node *node, 975static void __init sunxi_gates_clk_setup(struct device_node *node,
750 struct gates_data *data) 976 struct gates_data *data)
751{ 977{
752 struct clk_onecell_data *clk_data; 978 struct clk_onecell_data *clk_data;
979 struct gates_reset_data *reset_data;
753 const char *clk_parent; 980 const char *clk_parent;
754 const char *clk_name; 981 const char *clk_name;
755 void *reg; 982 void *reg;
@@ -793,6 +1020,21 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
793 clk_data->clk_num = i; 1020 clk_data->clk_num = i;
794 1021
795 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); 1022 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
1023
1024 /* Register a reset controler for gates with reset bits */
1025 if (data->reset_mask == 0)
1026 return;
1027
1028 reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
1029 if (!reset_data)
1030 return;
1031
1032 reset_data->reg = reg;
1033 reset_data->lock = &clk_lock;
1034 reset_data->rcdev.nr_resets = __fls(data->reset_mask) + 1;
1035 reset_data->rcdev.ops = &sunxi_gates_reset_ops;
1036 reset_data->rcdev.of_node = node;
1037 reset_controller_register(&reset_data->rcdev);
796} 1038}
797 1039
798 1040
@@ -832,7 +1074,7 @@ static const struct divs_data pll5_divs_data __initconst = {
832}; 1074};
833 1075
834static const struct divs_data pll6_divs_data __initconst = { 1076static const struct divs_data pll6_divs_data __initconst = {
835 .factors = &sun4i_pll5_data, 1077 .factors = &sun4i_pll6_data,
836 .div = { 1078 .div = {
837 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */ 1079 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
838 { .fixed = 2 }, /* P, other */ 1080 { .fixed = 2 }, /* P, other */
@@ -854,7 +1096,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
854 struct divs_data *data) 1096 struct divs_data *data)
855{ 1097{
856 struct clk_onecell_data *clk_data; 1098 struct clk_onecell_data *clk_data;
857 const char *parent = node->name; 1099 const char *parent;
858 const char *clk_name; 1100 const char *clk_name;
859 struct clk **clks, *pclk; 1101 struct clk **clks, *pclk;
860 struct clk_hw *gate_hw, *rate_hw; 1102 struct clk_hw *gate_hw, *rate_hw;
@@ -868,6 +1110,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
868 1110
869 /* Set up factor clock that we will be dividing */ 1111 /* Set up factor clock that we will be dividing */
870 pclk = sunxi_factors_clk_setup(node, data->factors); 1112 pclk = sunxi_factors_clk_setup(node, data->factors);
1113 parent = __clk_get_name(pclk);
871 1114
872 reg = of_iomap(node, 0); 1115 reg = of_iomap(node, 0);
873 1116
@@ -970,56 +1213,60 @@ free_clkdata:
970 1213
971/* Matches for factors clocks */ 1214/* Matches for factors clocks */
972static const struct of_device_id clk_factors_match[] __initconst = { 1215static const struct of_device_id clk_factors_match[] __initconst = {
973 {.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,}, 1216 {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
974 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,}, 1217 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
975 {.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,}, 1218 {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
976 {.compatible = "allwinner,sun4i-mod0-clk", .data = &sun4i_mod0_data,}, 1219 {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
1220 {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
1221 {.compatible = "allwinner,sun4i-a10-mod0-clk", .data = &sun4i_mod0_data,},
977 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,}, 1222 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
978 {} 1223 {}
979}; 1224};
980 1225
981/* Matches for divider clocks */ 1226/* Matches for divider clocks */
982static const struct of_device_id clk_div_match[] __initconst = { 1227static const struct of_device_id clk_div_match[] __initconst = {
983 {.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,}, 1228 {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
984 {.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,}, 1229 {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
985 {.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,}, 1230 {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
986 {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,}, 1231 {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
987 {} 1232 {}
988}; 1233};
989 1234
990/* Matches for divided outputs */ 1235/* Matches for divided outputs */
991static const struct of_device_id clk_divs_match[] __initconst = { 1236static const struct of_device_id clk_divs_match[] __initconst = {
992 {.compatible = "allwinner,sun4i-pll5-clk", .data = &pll5_divs_data,}, 1237 {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
993 {.compatible = "allwinner,sun4i-pll6-clk", .data = &pll6_divs_data,}, 1238 {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
994 {} 1239 {}
995}; 1240};
996 1241
997/* Matches for mux clocks */ 1242/* Matches for mux clocks */
998static const struct of_device_id clk_mux_match[] __initconst = { 1243static const struct of_device_id clk_mux_match[] __initconst = {
999 {.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,}, 1244 {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
1000 {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,}, 1245 {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
1001 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,}, 1246 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
1002 {} 1247 {}
1003}; 1248};
1004 1249
1005/* Matches for gate clocks */ 1250/* Matches for gate clocks */
1006static const struct of_device_id clk_gates_match[] __initconst = { 1251static const struct of_device_id clk_gates_match[] __initconst = {
1007 {.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,}, 1252 {.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,},
1008 {.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,}, 1253 {.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
1009 {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,}, 1254 {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
1010 {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,}, 1255 {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
1011 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,}, 1256 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
1012 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,}, 1257 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
1013 {.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, 1258 {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
1014 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,}, 1259 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
1015 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,}, 1260 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
1016 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,}, 1261 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
1017 {.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, 1262 {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
1018 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,}, 1263 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
1019 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,}, 1264 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
1020 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,}, 1265 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
1021 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,}, 1266 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
1022 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, 1267 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
1268 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
1269 {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
1023 {} 1270 {}
1024}; 1271};
1025 1272
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 356e9b804421..9e899c18af86 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -130,7 +130,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
130 .disable = clk_periph_disable, 130 .disable = clk_periph_disable,
131}; 131};
132 132
133const struct clk_ops tegra_clk_periph_no_gate_ops = { 133static const struct clk_ops tegra_clk_periph_no_gate_ops = {
134 .get_parent = clk_periph_get_parent, 134 .get_parent = clk_periph_get_parent,
135 .set_parent = clk_periph_set_parent, 135 .set_parent = clk_periph_set_parent,
136 .recalc_rate = clk_periph_recalc_rate, 136 .recalc_rate = clk_periph_recalc_rate,
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 776ee4594bd4..028b33783d38 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -34,7 +34,6 @@ static struct ti_dt_clk am33xx_clks[] = {
34 DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"), 34 DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
35 DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"), 35 DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
36 DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"), 36 DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
37 DT_CLK("cpu0", NULL, "dpll_mpu_ck"),
38 DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"), 37 DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
39 DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"), 38 DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
40 DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"), 39 DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index a15e445570b2..e6aa10db7bba 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -112,7 +112,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
112 return parent_rate; 112 return parent_rate;
113 } 113 }
114 114
115 return parent_rate / div; 115 return DIV_ROUND_UP(parent_rate, div);
116} 116}
117 117
118/* 118/*
@@ -182,7 +182,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
182 } 182 }
183 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 183 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
184 MULT_ROUND_UP(rate, i)); 184 MULT_ROUND_UP(rate, i));
185 now = parent_rate / i; 185 now = DIV_ROUND_UP(parent_rate, i);
186 if (now <= rate && now > best) { 186 if (now <= rate && now > best) {
187 bestdiv = i; 187 bestdiv = i;
188 best = now; 188 best = now;
@@ -205,7 +205,7 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
205 int div; 205 int div;
206 div = ti_clk_divider_bestdiv(hw, rate, prate); 206 div = ti_clk_divider_bestdiv(hw, rate, prate);
207 207
208 return *prate / div; 208 return DIV_ROUND_UP(*prate, div);
209} 209}
210 210
211static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 211static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -216,7 +216,7 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
216 unsigned long flags = 0; 216 unsigned long flags = 0;
217 u32 val; 217 u32 val;
218 218
219 div = parent_rate / rate; 219 div = DIV_ROUND_UP(parent_rate, rate);
220 value = _get_val(divider, div); 220 value = _get_val(divider, div);
221 221
222 if (value > div_mask(divider)) 222 if (value > div_mask(divider))
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index cdeff299de26..7b55ef89baa5 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -29,7 +29,8 @@ static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_C
29#define PRCC_KCLK_STORE(clk, base, bit) \ 29#define PRCC_KCLK_STORE(clk, base, bit) \
30 prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk 30 prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
31 31
32struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, void *data) 32static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
33 void *data)
33{ 34{
34 struct clk **clk_data = data; 35 struct clk **clk_data = data;
35 unsigned int base, bit; 36 unsigned int base, bit;
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index c812b93a52b2..52c09afdcfb7 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -149,7 +149,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
149 clks[fclk] = clk_register_gate(NULL, clk_name, 149 clks[fclk] = clk_register_gate(NULL, clk_name,
150 div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg, 150 div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
151 0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock); 151 0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
152 enable_reg = readl(fclk_gate_reg) & 1; 152 enable_reg = clk_readl(fclk_gate_reg) & 1;
153 if (enable && !enable_reg) { 153 if (enable && !enable_reg) {
154 if (clk_prepare_enable(clks[fclk])) 154 if (clk_prepare_enable(clks[fclk]))
155 pr_warn("%s: FCLK%u enable failed\n", __func__, 155 pr_warn("%s: FCLK%u enable failed\n", __func__,
@@ -278,7 +278,7 @@ static void __init zynq_clk_setup(struct device_node *np)
278 SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock); 278 SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
279 279
280 /* CPU clocks */ 280 /* CPU clocks */
281 tmp = readl(SLCR_621_TRUE) & 1; 281 tmp = clk_readl(SLCR_621_TRUE) & 1;
282 clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, 282 clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
283 CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0, 283 CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
284 &armclk_lock); 284 &armclk_lock);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 3226f54fa595..cec97596fe65 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
90 * makes probably sense to redundantly save fbdiv in the struct 90 * makes probably sense to redundantly save fbdiv in the struct
91 * zynq_pll to save the IO access. 91 * zynq_pll to save the IO access.
92 */ 92 */
93 fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >> 93 fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
94 PLLCTRL_FBDIV_SHIFT; 94 PLLCTRL_FBDIV_SHIFT;
95 95
96 return parent_rate * fbdiv; 96 return parent_rate * fbdiv;
@@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
112 112
113 spin_lock_irqsave(clk->lock, flags); 113 spin_lock_irqsave(clk->lock, flags);
114 114
115 reg = readl(clk->pll_ctrl); 115 reg = clk_readl(clk->pll_ctrl);
116 116
117 spin_unlock_irqrestore(clk->lock, flags); 117 spin_unlock_irqrestore(clk->lock, flags);
118 118
@@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw)
138 /* Power up PLL and wait for lock */ 138 /* Power up PLL and wait for lock */
139 spin_lock_irqsave(clk->lock, flags); 139 spin_lock_irqsave(clk->lock, flags);
140 140
141 reg = readl(clk->pll_ctrl); 141 reg = clk_readl(clk->pll_ctrl);
142 reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK); 142 reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK);
143 writel(reg, clk->pll_ctrl); 143 clk_writel(reg, clk->pll_ctrl);
144 while (!(readl(clk->pll_status) & (1 << clk->lockbit))) 144 while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit)))
145 ; 145 ;
146 146
147 spin_unlock_irqrestore(clk->lock, flags); 147 spin_unlock_irqrestore(clk->lock, flags);
@@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw)
168 /* shut down PLL */ 168 /* shut down PLL */
169 spin_lock_irqsave(clk->lock, flags); 169 spin_lock_irqsave(clk->lock, flags);
170 170
171 reg = readl(clk->pll_ctrl); 171 reg = clk_readl(clk->pll_ctrl);
172 reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK; 172 reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK;
173 writel(reg, clk->pll_ctrl); 173 clk_writel(reg, clk->pll_ctrl);
174 174
175 spin_unlock_irqrestore(clk->lock, flags); 175 spin_unlock_irqrestore(clk->lock, flags);
176} 176}
@@ -225,9 +225,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent,
225 225
226 spin_lock_irqsave(pll->lock, flags); 226 spin_lock_irqsave(pll->lock, flags);
227 227
228 reg = readl(pll->pll_ctrl); 228 reg = clk_readl(pll->pll_ctrl);
229 reg &= ~PLLCTRL_BPQUAL_MASK; 229 reg &= ~PLLCTRL_BPQUAL_MASK;
230 writel(reg, pll->pll_ctrl); 230 clk_writel(reg, pll->pll_ctrl);
231 231
232 spin_unlock_irqrestore(pll->lock, flags); 232 spin_unlock_irqrestore(pll->lock, flags);
233 233