aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLoc Ho <lho@apm.com>2013-06-26 13:56:09 -0400
committerMike Turquette <mturquette@linaro.org>2013-10-07 14:22:15 -0400
commit308964caeebc45eb7723c87818076f61fa1a2e1b (patch)
tree39fd02e37afe9aab1d5e4e0b60cd8a68a4737385
parent92947789815f9ec808ad3de177af99a5d02ef6b2 (diff)
clk: Add APM X-Gene SoC clock driver
clk: Add APM X-Gene SoC clock driver for reference, PLL, and device clocks. Signed-off-by: Loc Ho <lho@apm.com> Signed-off-by: Kumar Sankaran <ksankaran@apm.com> Signed-off-by: Vinayak Kale <vkale@apm.com> Signed-off-by: Feng Kan <fkan@apm.com> Signed-off-by: Mike Turquette <mturquette@linaro.org>
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-xgene.c521
3 files changed, 529 insertions, 0 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 279407a36391..dd37f91289d2 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -93,6 +93,13 @@ config CLK_PPC_CORENET
93 This adds the clock driver support for Freescale PowerPC corenet 93 This adds the clock driver support for Freescale PowerPC corenet
94 platforms using common clock framework. 94 platforms using common clock framework.
95 95
96config COMMON_CLK_XGENE
97 bool "Clock driver for APM XGene SoC"
98 default y
99 depends on ARM64
100 ---help---
101 Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
102
96endmenu 103endmenu
97 104
98source "drivers/clk/mvebu/Kconfig" 105source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 7b111062ccba..270f3fd2fbc0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
32obj-$(CONFIG_ARCH_ZYNQ) += zynq/ 32obj-$(CONFIG_ARCH_ZYNQ) += zynq/
33obj-$(CONFIG_ARCH_TEGRA) += tegra/ 33obj-$(CONFIG_ARCH_TEGRA) += tegra/
34obj-$(CONFIG_PLAT_SAMSUNG) += samsung/ 34obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
35obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
35 36
36obj-$(CONFIG_X86) += x86/ 37obj-$(CONFIG_X86) += x86/
37 38
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
new file mode 100644
index 000000000000..dd8a62d8f11f
--- /dev/null
+++ b/drivers/clk/clk-xgene.c
@@ -0,0 +1,521 @@
1/*
2 * clk-xgene.c - AppliedMicro X-Gene Clock Interface
3 *
4 * Copyright (c) 2013, Applied Micro Circuits Corporation
5 * Author: Loc Ho <lho@apm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20 * MA 02111-1307 USA
21 *
22 */
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/of.h>
27#include <linux/clkdev.h>
28#include <linux/clk-provider.h>
29#include <linux/of_address.h>
30#include <asm/setup.h>
31
32/* Register SCU_PCPPLL bit fields */
33#define N_DIV_RD(src) (((src) & 0x000001ff))
34
35/* Register SCU_SOCPLL bit fields */
36#define CLKR_RD(src) (((src) & 0x07000000)>>24)
37#define CLKOD_RD(src) (((src) & 0x00300000)>>20)
38#define REGSPEC_RESET_F1_MASK 0x00010000
39#define CLKF_RD(src) (((src) & 0x000001ff))
40
41#define XGENE_CLK_DRIVER_VER "0.1"
42
43static DEFINE_SPINLOCK(clk_lock);
44
45static inline u32 xgene_clk_read(void *csr)
46{
47 return readl_relaxed(csr);
48}
49
50static inline void xgene_clk_write(u32 data, void *csr)
51{
52 return writel_relaxed(data, csr);
53}
54
55/* PLL Clock */
56enum xgene_pll_type {
57 PLL_TYPE_PCP = 0,
58 PLL_TYPE_SOC = 1,
59};
60
61struct xgene_clk_pll {
62 struct clk_hw hw;
63 const char *name;
64 void __iomem *reg;
65 spinlock_t *lock;
66 u32 pll_offset;
67 enum xgene_pll_type type;
68};
69
70#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
71
72static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
73{
74 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
75 u32 data;
76
77 data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
78 pr_debug("%s pll %s\n", pllclk->name,
79 data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
80
81 return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
82}
83
84static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
85 unsigned long parent_rate)
86{
87 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
88 unsigned long fref;
89 unsigned long fvco;
90 u32 pll;
91 u32 nref;
92 u32 nout;
93 u32 nfb;
94
95 pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
96
97 if (pllclk->type == PLL_TYPE_PCP) {
98 /*
99 * PLL VCO = Reference clock * NF
100 * PCP PLL = PLL_VCO / 2
101 */
102 nout = 2;
103 fvco = parent_rate * (N_DIV_RD(pll) + 4);
104 } else {
105 /*
106 * Fref = Reference Clock / NREF;
107 * Fvco = Fref * NFB;
108 * Fout = Fvco / NOUT;
109 */
110 nref = CLKR_RD(pll) + 1;
111 nout = CLKOD_RD(pll) + 1;
112 nfb = CLKF_RD(pll);
113 fref = parent_rate / nref;
114 fvco = fref * nfb;
115 }
116 pr_debug("%s pll recalc rate %ld parent %ld\n", pllclk->name,
117 fvco / nout, parent_rate);
118
119 return fvco / nout;
120}
121
122const struct clk_ops xgene_clk_pll_ops = {
123 .is_enabled = xgene_clk_pll_is_enabled,
124 .recalc_rate = xgene_clk_pll_recalc_rate,
125};
126
127static struct clk *xgene_register_clk_pll(struct device *dev,
128 const char *name, const char *parent_name,
129 unsigned long flags, void __iomem *reg, u32 pll_offset,
130 u32 type, spinlock_t *lock)
131{
132 struct xgene_clk_pll *apmclk;
133 struct clk *clk;
134 struct clk_init_data init;
135
136 /* allocate the APM clock structure */
137 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
138 if (!apmclk) {
139 pr_err("%s: could not allocate APM clk\n", __func__);
140 return ERR_PTR(-ENOMEM);
141 }
142
143 init.name = name;
144 init.ops = &xgene_clk_pll_ops;
145 init.flags = flags;
146 init.parent_names = parent_name ? &parent_name : NULL;
147 init.num_parents = parent_name ? 1 : 0;
148
149 apmclk->name = name;
150 apmclk->reg = reg;
151 apmclk->lock = lock;
152 apmclk->pll_offset = pll_offset;
153 apmclk->type = type;
154 apmclk->hw.init = &init;
155
156 /* Register the clock */
157 clk = clk_register(dev, &apmclk->hw);
158 if (IS_ERR(clk)) {
159 pr_err("%s: could not register clk %s\n", __func__, name);
160 kfree(apmclk);
161 return NULL;
162 }
163 return clk;
164}
165
166static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
167{
168 const char *clk_name = np->full_name;
169 struct clk *clk;
170 void *reg;
171
172 reg = of_iomap(np, 0);
173 if (reg == NULL) {
174 pr_err("Unable to map CSR register for %s\n", np->full_name);
175 return;
176 }
177 of_property_read_string(np, "clock-output-names", &clk_name);
178 clk = xgene_register_clk_pll(NULL,
179 clk_name, of_clk_get_parent_name(np, 0),
180 CLK_IS_ROOT, reg, 0, pll_type, &clk_lock);
181 if (!IS_ERR(clk)) {
182 of_clk_add_provider(np, of_clk_src_simple_get, clk);
183 clk_register_clkdev(clk, clk_name, NULL);
184 pr_debug("Add %s clock PLL\n", clk_name);
185 }
186}
187
188static void xgene_socpllclk_init(struct device_node *np)
189{
190 xgene_pllclk_init(np, PLL_TYPE_SOC);
191}
192
193static void xgene_pcppllclk_init(struct device_node *np)
194{
195 xgene_pllclk_init(np, PLL_TYPE_PCP);
196}
197
198/* IP Clock */
199struct xgene_dev_parameters {
200 void __iomem *csr_reg; /* CSR for IP clock */
201 u32 reg_clk_offset; /* Offset to clock enable CSR */
202 u32 reg_clk_mask; /* Mask bit for clock enable */
203 u32 reg_csr_offset; /* Offset to CSR reset */
204 u32 reg_csr_mask; /* Mask bit for disable CSR reset */
205 void __iomem *divider_reg; /* CSR for divider */
206 u32 reg_divider_offset; /* Offset to divider register */
207 u32 reg_divider_shift; /* Bit shift to divider field */
208 u32 reg_divider_width; /* Width of the bit to divider field */
209};
210
211struct xgene_clk {
212 struct clk_hw hw;
213 const char *name;
214 spinlock_t *lock;
215 struct xgene_dev_parameters param;
216};
217
218#define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
219
220static int xgene_clk_enable(struct clk_hw *hw)
221{
222 struct xgene_clk *pclk = to_xgene_clk(hw);
223 unsigned long flags = 0;
224 u32 data;
225
226 if (pclk->lock)
227 spin_lock_irqsave(pclk->lock, flags);
228
229 if (pclk->param.csr_reg != NULL) {
230 pr_debug("%s clock enabled\n", pclk->name);
231 /* First enable the clock */
232 data = xgene_clk_read(pclk->param.csr_reg +
233 pclk->param.reg_clk_offset);
234 data |= pclk->param.reg_clk_mask;
235 xgene_clk_write(data, pclk->param.csr_reg +
236 pclk->param.reg_clk_offset);
237 pr_debug("%s clock PADDR base 0x%016LX clk offset 0x%08X mask 0x%08X value 0x%08X\n",
238 pclk->name, __pa(pclk->param.csr_reg),
239 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
240 data);
241
242 /* Second enable the CSR */
243 data = xgene_clk_read(pclk->param.csr_reg +
244 pclk->param.reg_csr_offset);
245 data &= ~pclk->param.reg_csr_mask;
246 xgene_clk_write(data, pclk->param.csr_reg +
247 pclk->param.reg_csr_offset);
248 pr_debug("%s CSR RESET PADDR base 0x%016LX csr offset 0x%08X mask 0x%08X value 0x%08X\n",
249 pclk->name, __pa(pclk->param.csr_reg),
250 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
251 data);
252 }
253
254 if (pclk->lock)
255 spin_unlock_irqrestore(pclk->lock, flags);
256
257 return 0;
258}
259
260static void xgene_clk_disable(struct clk_hw *hw)
261{
262 struct xgene_clk *pclk = to_xgene_clk(hw);
263 unsigned long flags = 0;
264 u32 data;
265
266 if (pclk->lock)
267 spin_lock_irqsave(pclk->lock, flags);
268
269 if (pclk->param.csr_reg != NULL) {
270 pr_debug("%s clock disabled\n", pclk->name);
271 /* First put the CSR in reset */
272 data = xgene_clk_read(pclk->param.csr_reg +
273 pclk->param.reg_csr_offset);
274 data |= pclk->param.reg_csr_mask;
275 xgene_clk_write(data, pclk->param.csr_reg +
276 pclk->param.reg_csr_offset);
277
278 /* Second disable the clock */
279 data = xgene_clk_read(pclk->param.csr_reg +
280 pclk->param.reg_clk_offset);
281 data &= ~pclk->param.reg_clk_mask;
282 xgene_clk_write(data, pclk->param.csr_reg +
283 pclk->param.reg_clk_offset);
284 }
285
286 if (pclk->lock)
287 spin_unlock_irqrestore(pclk->lock, flags);
288}
289
290static int xgene_clk_is_enabled(struct clk_hw *hw)
291{
292 struct xgene_clk *pclk = to_xgene_clk(hw);
293 u32 data = 0;
294
295 if (pclk->param.csr_reg != NULL) {
296 pr_debug("%s clock checking\n", pclk->name);
297 data = xgene_clk_read(pclk->param.csr_reg +
298 pclk->param.reg_clk_offset);
299 pr_debug("%s clock is %s\n", pclk->name,
300 data & pclk->param.reg_clk_mask ? "enabled" :
301 "disabled");
302 }
303
304 if (pclk->param.csr_reg == NULL)
305 return 1;
306 return data & pclk->param.reg_clk_mask ? 1 : 0;
307}
308
309static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
310 unsigned long parent_rate)
311{
312 struct xgene_clk *pclk = to_xgene_clk(hw);
313 u32 data;
314
315 if (pclk->param.divider_reg) {
316 data = xgene_clk_read(pclk->param.divider_reg +
317 pclk->param.reg_divider_offset);
318 data >>= pclk->param.reg_divider_shift;
319 data &= (1 << pclk->param.reg_divider_width) - 1;
320
321 pr_debug("%s clock recalc rate %ld parent %ld\n",
322 pclk->name, parent_rate / data, parent_rate);
323 return parent_rate / data;
324 } else {
325 pr_debug("%s clock recalc rate %ld parent %ld\n",
326 pclk->name, parent_rate, parent_rate);
327 return parent_rate;
328 }
329}
330
331static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
332 unsigned long parent_rate)
333{
334 struct xgene_clk *pclk = to_xgene_clk(hw);
335 unsigned long flags = 0;
336 u32 data;
337 u32 divider;
338 u32 divider_save;
339
340 if (pclk->lock)
341 spin_lock_irqsave(pclk->lock, flags);
342
343 if (pclk->param.divider_reg) {
344 /* Let's compute the divider */
345 if (rate > parent_rate)
346 rate = parent_rate;
347 divider_save = divider = parent_rate / rate; /* Rounded down */
348 divider &= (1 << pclk->param.reg_divider_width) - 1;
349 divider <<= pclk->param.reg_divider_shift;
350
351 /* Set new divider */
352 data = xgene_clk_read(pclk->param.divider_reg +
353 pclk->param.reg_divider_offset);
354 data &= ~((1 << pclk->param.reg_divider_width) - 1);
355 data |= divider;
356 xgene_clk_write(data, pclk->param.divider_reg +
357 pclk->param.reg_divider_offset);
358 pr_debug("%s clock set rate %ld\n", pclk->name,
359 parent_rate / divider_save);
360 } else {
361 divider_save = 1;
362 }
363
364 if (pclk->lock)
365 spin_unlock_irqrestore(pclk->lock, flags);
366
367 return parent_rate / divider_save;
368}
369
370static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
371 unsigned long *prate)
372{
373 struct xgene_clk *pclk = to_xgene_clk(hw);
374 unsigned long parent_rate = *prate;
375 u32 divider;
376
377 if (pclk->param.divider_reg) {
378 /* Let's compute the divider */
379 if (rate > parent_rate)
380 rate = parent_rate;
381 divider = parent_rate / rate; /* Rounded down */
382 } else {
383 divider = 1;
384 }
385
386 return parent_rate / divider;
387}
388
389const struct clk_ops xgene_clk_ops = {
390 .enable = xgene_clk_enable,
391 .disable = xgene_clk_disable,
392 .is_enabled = xgene_clk_is_enabled,
393 .recalc_rate = xgene_clk_recalc_rate,
394 .set_rate = xgene_clk_set_rate,
395 .round_rate = xgene_clk_round_rate,
396};
397
398static struct clk *xgene_register_clk(struct device *dev,
399 const char *name, const char *parent_name,
400 struct xgene_dev_parameters *parameters, spinlock_t *lock)
401{
402 struct xgene_clk *apmclk;
403 struct clk *clk;
404 struct clk_init_data init;
405 int rc;
406
407 /* allocate the APM clock structure */
408 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
409 if (!apmclk) {
410 pr_err("%s: could not allocate APM clk\n", __func__);
411 return ERR_PTR(-ENOMEM);
412 }
413
414 init.name = name;
415 init.ops = &xgene_clk_ops;
416 init.flags = 0;
417 init.parent_names = parent_name ? &parent_name : NULL;
418 init.num_parents = parent_name ? 1 : 0;
419
420 apmclk->name = name;
421 apmclk->lock = lock;
422 apmclk->hw.init = &init;
423 apmclk->param = *parameters;
424
425 /* Register the clock */
426 clk = clk_register(dev, &apmclk->hw);
427 if (IS_ERR(clk)) {
428 pr_err("%s: could not register clk %s\n", __func__, name);
429 kfree(apmclk);
430 return clk;
431 }
432
433 /* Register the clock for lookup */
434 rc = clk_register_clkdev(clk, name, NULL);
435 if (rc != 0) {
436 pr_err("%s: could not register lookup clk %s\n",
437 __func__, name);
438 }
439 return clk;
440}
441
442static void __init xgene_devclk_init(struct device_node *np)
443{
444 const char *clk_name = np->full_name;
445 struct clk *clk;
446 struct resource res;
447 int rc;
448 struct xgene_dev_parameters parameters;
449 int i;
450
451 /* Check if the entry is disabled */
452 if (!of_device_is_available(np))
453 return;
454
455 /* Parse the DTS register for resource */
456 parameters.csr_reg = NULL;
457 parameters.divider_reg = NULL;
458 for (i = 0; i < 2; i++) {
459 void *map_res;
460 rc = of_address_to_resource(np, i, &res);
461 if (rc != 0) {
462 if (i == 0) {
463 pr_err("no DTS register for %s\n",
464 np->full_name);
465 return;
466 }
467 break;
468 }
469 map_res = of_iomap(np, i);
470 if (map_res == NULL) {
471 pr_err("Unable to map resource %d for %s\n",
472 i, np->full_name);
473 goto err;
474 }
475 if (strcmp(res.name, "div-reg") == 0)
476 parameters.divider_reg = map_res;
477 else /* if (strcmp(res->name, "csr-reg") == 0) */
478 parameters.csr_reg = map_res;
479 }
480 if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
481 parameters.reg_csr_offset = 0;
482 if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
483 parameters.reg_csr_mask = 0xF;
484 if (of_property_read_u32(np, "enable-offset",
485 &parameters.reg_clk_offset))
486 parameters.reg_clk_offset = 0x8;
487 if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
488 parameters.reg_clk_mask = 0xF;
489 if (of_property_read_u32(np, "divider-offset",
490 &parameters.reg_divider_offset))
491 parameters.reg_divider_offset = 0;
492 if (of_property_read_u32(np, "divider-width",
493 &parameters.reg_divider_width))
494 parameters.reg_divider_width = 0;
495 if (of_property_read_u32(np, "divider-shift",
496 &parameters.reg_divider_shift))
497 parameters.reg_divider_shift = 0;
498 of_property_read_string(np, "clock-output-names", &clk_name);
499
500 clk = xgene_register_clk(NULL, clk_name,
501 of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
502 if (IS_ERR(clk))
503 goto err;
504 pr_debug("Add %s clock\n", clk_name);
505 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
506 if (rc != 0)
507 pr_err("%s: could register provider clk %s\n", __func__,
508 np->full_name);
509
510 return;
511
512err:
513 if (parameters.csr_reg)
514 iounmap(parameters.csr_reg);
515 if (parameters.divider_reg)
516 iounmap(parameters.divider_reg);
517}
518
519CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
520CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
521CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);