diff options
Diffstat (limited to 'arch/arm/mach-omap2/clock.c')
-rw-r--r-- | arch/arm/mach-omap2/clock.c | 910 |
1 files changed, 242 insertions, 668 deletions
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index e381d991092c..e4ec3a69ee2e 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk-provider.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
26 | 26 | ||
@@ -55,9 +55,28 @@ u16 cpu_mask; | |||
55 | */ | 55 | */ |
56 | static bool clkdm_control = true; | 56 | static bool clkdm_control = true; |
57 | 57 | ||
58 | static LIST_HEAD(clocks); | 58 | static LIST_HEAD(clk_hw_omap_clocks); |
59 | static DEFINE_MUTEX(clocks_mutex); | 59 | |
60 | static DEFINE_SPINLOCK(clockfw_lock); | 60 | /* |
61 | * Used for clocks that have the same value as the parent clock, | ||
62 | * divided by some factor | ||
63 | */ | ||
64 | unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw, | ||
65 | unsigned long parent_rate) | ||
66 | { | ||
67 | struct clk_hw_omap *oclk; | ||
68 | |||
69 | if (!hw) { | ||
70 | pr_warn("%s: hw is NULL\n", __func__); | ||
71 | return -EINVAL; | ||
72 | } | ||
73 | |||
74 | oclk = to_clk_hw_omap(hw); | ||
75 | |||
76 | WARN_ON(!oclk->fixed_div); | ||
77 | |||
78 | return parent_rate / oclk->fixed_div; | ||
79 | } | ||
61 | 80 | ||
62 | /* | 81 | /* |
63 | * OMAP2+ specific clock functions | 82 | * OMAP2+ specific clock functions |
@@ -109,7 +128,7 @@ static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest, | |||
109 | * belong in the clock code and will be moved in the medium term to | 128 | * belong in the clock code and will be moved in the medium term to |
110 | * module-dependent code. No return value. | 129 | * module-dependent code. No return value. |
111 | */ | 130 | */ |
112 | static void _omap2_module_wait_ready(struct clk *clk) | 131 | static void _omap2_module_wait_ready(struct clk_hw_omap *clk) |
113 | { | 132 | { |
114 | void __iomem *companion_reg, *idlest_reg; | 133 | void __iomem *companion_reg, *idlest_reg; |
115 | u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; | 134 | u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; |
@@ -124,12 +143,11 @@ static void _omap2_module_wait_ready(struct clk *clk) | |||
124 | } | 143 | } |
125 | 144 | ||
126 | clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); | 145 | clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); |
127 | |||
128 | r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); | 146 | r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); |
129 | if (r) { | 147 | if (r) { |
130 | /* IDLEST register not in the CM module */ | 148 | /* IDLEST register not in the CM module */ |
131 | _wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val, | 149 | _wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val, |
132 | clk->name); | 150 | __clk_get_name(clk->hw.clk)); |
133 | } else { | 151 | } else { |
134 | cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit); | 152 | cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit); |
135 | }; | 153 | }; |
@@ -145,15 +163,16 @@ static void _omap2_module_wait_ready(struct clk *clk) | |||
145 | * clockdomain pointer, and save it into the struct clk. Intended to be | 163 | * clockdomain pointer, and save it into the struct clk. Intended to be |
146 | * called during clk_register(). No return value. | 164 | * called during clk_register(). No return value. |
147 | */ | 165 | */ |
148 | void omap2_init_clk_clkdm(struct clk *clk) | 166 | void omap2_init_clk_clkdm(struct clk_hw *hw) |
149 | { | 167 | { |
168 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); | ||
150 | struct clockdomain *clkdm; | 169 | struct clockdomain *clkdm; |
151 | const char *clk_name; | 170 | const char *clk_name; |
152 | 171 | ||
153 | if (!clk->clkdm_name) | 172 | if (!clk->clkdm_name) |
154 | return; | 173 | return; |
155 | 174 | ||
156 | clk_name = __clk_get_name(clk); | 175 | clk_name = __clk_get_name(hw->clk); |
157 | 176 | ||
158 | clkdm = clkdm_lookup(clk->clkdm_name); | 177 | clkdm = clkdm_lookup(clk->clkdm_name); |
159 | if (clkdm) { | 178 | if (clkdm) { |
@@ -200,8 +219,8 @@ void __init omap2_clk_disable_clkdm_control(void) | |||
200 | * associate this type of code with per-module data structures to | 219 | * associate this type of code with per-module data structures to |
201 | * avoid this issue, and remove the casts. No return value. | 220 | * avoid this issue, and remove the casts. No return value. |
202 | */ | 221 | */ |
203 | void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, | 222 | void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, |
204 | u8 *other_bit) | 223 | void __iomem **other_reg, u8 *other_bit) |
205 | { | 224 | { |
206 | u32 r; | 225 | u32 r; |
207 | 226 | ||
@@ -229,8 +248,8 @@ void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, | |||
229 | * register address ID (e.g., that CM_FCLKEN2 corresponds to | 248 | * register address ID (e.g., that CM_FCLKEN2 corresponds to |
230 | * CM_IDLEST2). This is not true for all modules. No return value. | 249 | * CM_IDLEST2). This is not true for all modules. No return value. |
231 | */ | 250 | */ |
232 | void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, | 251 | void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, |
233 | u8 *idlest_bit, u8 *idlest_val) | 252 | void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) |
234 | { | 253 | { |
235 | u32 r; | 254 | u32 r; |
236 | 255 | ||
@@ -252,16 +271,44 @@ void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, | |||
252 | 271 | ||
253 | } | 272 | } |
254 | 273 | ||
255 | int omap2_dflt_clk_enable(struct clk *clk) | 274 | /** |
275 | * omap2_dflt_clk_enable - enable a clock in the hardware | ||
276 | * @hw: struct clk_hw * of the clock to enable | ||
277 | * | ||
278 | * Enable the clock @hw in the hardware. We first call into the OMAP | ||
279 | * clockdomain code to "enable" the corresponding clockdomain if this | ||
280 | * is the first enabled user of the clockdomain. Then program the | ||
281 | * hardware to enable the clock. Then wait for the IP block that uses | ||
282 | * this clock to leave idle (if applicable). Returns the error value | ||
283 | * from clkdm_clk_enable() if it terminated with an error, or -EINVAL | ||
284 | * if @hw has a null clock enable_reg, or zero upon success. | ||
285 | */ | ||
286 | int omap2_dflt_clk_enable(struct clk_hw *hw) | ||
256 | { | 287 | { |
288 | struct clk_hw_omap *clk; | ||
257 | u32 v; | 289 | u32 v; |
290 | int ret = 0; | ||
291 | |||
292 | clk = to_clk_hw_omap(hw); | ||
293 | |||
294 | if (clkdm_control && clk->clkdm) { | ||
295 | ret = clkdm_clk_enable(clk->clkdm, hw->clk); | ||
296 | if (ret) { | ||
297 | WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", | ||
298 | __func__, __clk_get_name(hw->clk), | ||
299 | clk->clkdm->name, ret); | ||
300 | return ret; | ||
301 | } | ||
302 | } | ||
258 | 303 | ||
259 | if (unlikely(clk->enable_reg == NULL)) { | 304 | if (unlikely(clk->enable_reg == NULL)) { |
260 | pr_err("clock.c: Enable for %s without enable code\n", | 305 | pr_err("%s: %s missing enable_reg\n", __func__, |
261 | clk->name); | 306 | __clk_get_name(hw->clk)); |
262 | return 0; /* REVISIT: -EINVAL */ | 307 | ret = -EINVAL; |
308 | goto err; | ||
263 | } | 309 | } |
264 | 310 | ||
311 | /* FIXME should not have INVERT_ENABLE bit here */ | ||
265 | v = __raw_readl(clk->enable_reg); | 312 | v = __raw_readl(clk->enable_reg); |
266 | if (clk->flags & INVERT_ENABLE) | 313 | if (clk->flags & INVERT_ENABLE) |
267 | v &= ~(1 << clk->enable_bit); | 314 | v &= ~(1 << clk->enable_bit); |
@@ -270,22 +317,39 @@ int omap2_dflt_clk_enable(struct clk *clk) | |||
270 | __raw_writel(v, clk->enable_reg); | 317 | __raw_writel(v, clk->enable_reg); |
271 | v = __raw_readl(clk->enable_reg); /* OCP barrier */ | 318 | v = __raw_readl(clk->enable_reg); /* OCP barrier */ |
272 | 319 | ||
273 | if (clk->ops->find_idlest) | 320 | if (clk->ops && clk->ops->find_idlest) |
274 | _omap2_module_wait_ready(clk); | 321 | _omap2_module_wait_ready(clk); |
275 | 322 | ||
276 | return 0; | 323 | return 0; |
324 | |||
325 | err: | ||
326 | if (clkdm_control && clk->clkdm) | ||
327 | clkdm_clk_disable(clk->clkdm, hw->clk); | ||
328 | return ret; | ||
277 | } | 329 | } |
278 | 330 | ||
279 | void omap2_dflt_clk_disable(struct clk *clk) | 331 | /** |
332 | * omap2_dflt_clk_disable - disable a clock in the hardware | ||
333 | * @hw: struct clk_hw * of the clock to disable | ||
334 | * | ||
335 | * Disable the clock @hw in the hardware, and call into the OMAP | ||
336 | * clockdomain code to "disable" the corresponding clockdomain if all | ||
337 | * clocks/hwmods in that clockdomain are now disabled. No return | ||
338 | * value. | ||
339 | */ | ||
340 | void omap2_dflt_clk_disable(struct clk_hw *hw) | ||
280 | { | 341 | { |
342 | struct clk_hw_omap *clk; | ||
281 | u32 v; | 343 | u32 v; |
282 | 344 | ||
345 | clk = to_clk_hw_omap(hw); | ||
283 | if (!clk->enable_reg) { | 346 | if (!clk->enable_reg) { |
284 | /* | 347 | /* |
285 | * 'Independent' here refers to a clock which is not | 348 | * 'independent' here refers to a clock which is not |
286 | * controlled by its parent. | 349 | * controlled by its parent. |
287 | */ | 350 | */ |
288 | pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name); | 351 | pr_err("%s: independent clock %s has no enable_reg\n", |
352 | __func__, __clk_get_name(hw->clk)); | ||
289 | return; | 353 | return; |
290 | } | 354 | } |
291 | 355 | ||
@@ -296,191 +360,213 @@ void omap2_dflt_clk_disable(struct clk *clk) | |||
296 | v &= ~(1 << clk->enable_bit); | 360 | v &= ~(1 << clk->enable_bit); |
297 | __raw_writel(v, clk->enable_reg); | 361 | __raw_writel(v, clk->enable_reg); |
298 | /* No OCP barrier needed here since it is a disable operation */ | 362 | /* No OCP barrier needed here since it is a disable operation */ |
299 | } | ||
300 | |||
301 | const struct clkops clkops_omap2_dflt_wait = { | ||
302 | .enable = omap2_dflt_clk_enable, | ||
303 | .disable = omap2_dflt_clk_disable, | ||
304 | .find_companion = omap2_clk_dflt_find_companion, | ||
305 | .find_idlest = omap2_clk_dflt_find_idlest, | ||
306 | }; | ||
307 | 363 | ||
308 | const struct clkops clkops_omap2_dflt = { | 364 | if (clkdm_control && clk->clkdm) |
309 | .enable = omap2_dflt_clk_enable, | 365 | clkdm_clk_disable(clk->clkdm, hw->clk); |
310 | .disable = omap2_dflt_clk_disable, | 366 | } |
311 | }; | ||
312 | 367 | ||
313 | /** | 368 | /** |
314 | * omap2_clk_disable - disable a clock, if the system is not using it | 369 | * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw |
315 | * @clk: struct clk * to disable | 370 | * @hw: struct clk_hw * of the clock being enabled |
316 | * | 371 | * |
317 | * Decrements the usecount on struct clk @clk. If there are no users | 372 | * Increment the usecount of the clockdomain of the clock pointed to |
318 | * left, call the clkops-specific clock disable function to disable it | 373 | * by @hw; if the usecount is 1, the clockdomain will be "enabled." |
319 | * in hardware. If the clock is part of a clockdomain (which they all | 374 | * Only needed for clocks that don't use omap2_dflt_clk_enable() as |
320 | * should be), request that the clockdomain be disabled. (It too has | 375 | * their enable function pointer. Passes along the return value of |
321 | * a usecount, and so will not be disabled in the hardware until it no | 376 | * clkdm_clk_enable(), -EINVAL if @hw is not associated with a |
322 | * longer has any users.) If the clock has a parent clock (most of | 377 | * clockdomain, or 0 if clock framework-based clockdomain control is |
323 | * them do), then call ourselves, recursing on the parent clock. This | 378 | * not implemented. |
324 | * can cause an entire branch of the clock tree to be powered off by | ||
325 | * simply disabling one clock. Intended to be called with the clockfw_lock | ||
326 | * spinlock held. No return value. | ||
327 | */ | 379 | */ |
328 | void omap2_clk_disable(struct clk *clk) | 380 | int omap2_clkops_enable_clkdm(struct clk_hw *hw) |
329 | { | 381 | { |
330 | if (clk->usecount == 0) { | 382 | struct clk_hw_omap *clk; |
331 | WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name); | 383 | int ret = 0; |
332 | return; | ||
333 | } | ||
334 | |||
335 | pr_debug("clock: %s: decrementing usecount\n", clk->name); | ||
336 | 384 | ||
337 | clk->usecount--; | 385 | clk = to_clk_hw_omap(hw); |
338 | 386 | ||
339 | if (clk->usecount > 0) | 387 | if (unlikely(!clk->clkdm)) { |
340 | return; | 388 | pr_err("%s: %s: no clkdm set ?!\n", __func__, |
389 | __clk_get_name(hw->clk)); | ||
390 | return -EINVAL; | ||
391 | } | ||
341 | 392 | ||
342 | pr_debug("clock: %s: disabling in hardware\n", clk->name); | 393 | if (unlikely(clk->enable_reg)) |
394 | pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, | ||
395 | __clk_get_name(hw->clk)); | ||
343 | 396 | ||
344 | if (clk->ops && clk->ops->disable) { | 397 | if (!clkdm_control) { |
345 | trace_clock_disable(clk->name, 0, smp_processor_id()); | 398 | pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", |
346 | clk->ops->disable(clk); | 399 | __func__, __clk_get_name(hw->clk)); |
400 | return 0; | ||
347 | } | 401 | } |
348 | 402 | ||
349 | if (clkdm_control && clk->clkdm) | 403 | ret = clkdm_clk_enable(clk->clkdm, hw->clk); |
350 | clkdm_clk_disable(clk->clkdm, clk); | 404 | WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n", |
405 | __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret); | ||
351 | 406 | ||
352 | if (clk->parent) | 407 | return ret; |
353 | omap2_clk_disable(clk->parent); | ||
354 | } | 408 | } |
355 | 409 | ||
356 | /** | 410 | /** |
357 | * omap2_clk_enable - request that the system enable a clock | 411 | * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw |
358 | * @clk: struct clk * to enable | 412 | * @hw: struct clk_hw * of the clock being disabled |
359 | * | 413 | * |
360 | * Increments the usecount on struct clk @clk. If there were no users | 414 | * Decrement the usecount of the clockdomain of the clock pointed to |
361 | * previously, then recurse up the clock tree, enabling all of the | 415 | * by @hw; if the usecount is 0, the clockdomain will be "disabled." |
362 | * clock's parents and all of the parent clockdomains, and finally, | 416 | * Only needed for clocks that don't use omap2_dflt_clk_disable() as their |
363 | * enabling @clk's clockdomain, and @clk itself. Intended to be | 417 | * disable function pointer. No return value. |
364 | * called with the clockfw_lock spinlock held. Returns 0 upon success | ||
365 | * or a negative error code upon failure. | ||
366 | */ | 418 | */ |
367 | int omap2_clk_enable(struct clk *clk) | 419 | void omap2_clkops_disable_clkdm(struct clk_hw *hw) |
368 | { | 420 | { |
369 | int ret; | 421 | struct clk_hw_omap *clk; |
370 | 422 | ||
371 | pr_debug("clock: %s: incrementing usecount\n", clk->name); | 423 | clk = to_clk_hw_omap(hw); |
372 | 424 | ||
373 | clk->usecount++; | 425 | if (unlikely(!clk->clkdm)) { |
374 | 426 | pr_err("%s: %s: no clkdm set ?!\n", __func__, | |
375 | if (clk->usecount > 1) | 427 | __clk_get_name(hw->clk)); |
376 | return 0; | 428 | return; |
377 | |||
378 | pr_debug("clock: %s: enabling in hardware\n", clk->name); | ||
379 | |||
380 | if (clk->parent) { | ||
381 | ret = omap2_clk_enable(clk->parent); | ||
382 | if (ret) { | ||
383 | WARN(1, "clock: %s: could not enable parent %s: %d\n", | ||
384 | clk->name, clk->parent->name, ret); | ||
385 | goto oce_err1; | ||
386 | } | ||
387 | } | 429 | } |
388 | 430 | ||
389 | if (clkdm_control && clk->clkdm) { | 431 | if (unlikely(clk->enable_reg)) |
390 | ret = clkdm_clk_enable(clk->clkdm, clk); | 432 | pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, |
391 | if (ret) { | 433 | __clk_get_name(hw->clk)); |
392 | WARN(1, "clock: %s: could not enable clockdomain %s: %d\n", | ||
393 | clk->name, clk->clkdm->name, ret); | ||
394 | goto oce_err2; | ||
395 | } | ||
396 | } | ||
397 | 434 | ||
398 | if (clk->ops && clk->ops->enable) { | 435 | if (!clkdm_control) { |
399 | trace_clock_enable(clk->name, 1, smp_processor_id()); | 436 | pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", |
400 | ret = clk->ops->enable(clk); | 437 | __func__, __clk_get_name(hw->clk)); |
401 | if (ret) { | 438 | return; |
402 | WARN(1, "clock: %s: could not enable: %d\n", | ||
403 | clk->name, ret); | ||
404 | goto oce_err3; | ||
405 | } | ||
406 | } | 439 | } |
407 | 440 | ||
408 | return 0; | 441 | clkdm_clk_disable(clk->clkdm, hw->clk); |
409 | |||
410 | oce_err3: | ||
411 | if (clkdm_control && clk->clkdm) | ||
412 | clkdm_clk_disable(clk->clkdm, clk); | ||
413 | oce_err2: | ||
414 | if (clk->parent) | ||
415 | omap2_clk_disable(clk->parent); | ||
416 | oce_err1: | ||
417 | clk->usecount--; | ||
418 | |||
419 | return ret; | ||
420 | } | 442 | } |
421 | 443 | ||
422 | /* Given a clock and a rate apply a clock specific rounding function */ | 444 | /** |
423 | long omap2_clk_round_rate(struct clk *clk, unsigned long rate) | 445 | * omap2_dflt_clk_is_enabled - is clock enabled in the hardware? |
446 | * @hw: struct clk_hw * to check | ||
447 | * | ||
448 | * Return 1 if the clock represented by @hw is enabled in the | ||
449 | * hardware, or 0 otherwise. Intended for use in the struct | ||
450 | * clk_ops.is_enabled function pointer. | ||
451 | */ | ||
452 | int omap2_dflt_clk_is_enabled(struct clk_hw *hw) | ||
424 | { | 453 | { |
425 | if (clk->round_rate) | 454 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
426 | return clk->round_rate(clk, rate); | 455 | u32 v; |
427 | 456 | ||
428 | return clk->rate; | 457 | v = __raw_readl(clk->enable_reg); |
458 | |||
459 | if (clk->flags & INVERT_ENABLE) | ||
460 | v ^= BIT(clk->enable_bit); | ||
461 | |||
462 | v &= BIT(clk->enable_bit); | ||
463 | |||
464 | return v ? 1 : 0; | ||
429 | } | 465 | } |
430 | 466 | ||
431 | /* Set the clock rate for a clock source */ | 467 | static int __initdata mpurate; |
432 | int omap2_clk_set_rate(struct clk *clk, unsigned long rate) | 468 | |
469 | /* | ||
470 | * By default we use the rate set by the bootloader. | ||
471 | * You can override this with mpurate= cmdline option. | ||
472 | */ | ||
473 | static int __init omap_clk_setup(char *str) | ||
433 | { | 474 | { |
434 | int ret = -EINVAL; | 475 | get_option(&str, &mpurate); |
435 | 476 | ||
436 | pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate); | 477 | if (!mpurate) |
478 | return 1; | ||
437 | 479 | ||
438 | /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */ | 480 | if (mpurate < 1000) |
439 | if (clk->set_rate) { | 481 | mpurate *= 1000000; |
440 | trace_clock_set_rate(clk->name, rate, smp_processor_id()); | ||
441 | ret = clk->set_rate(clk, rate); | ||
442 | } | ||
443 | 482 | ||
444 | return ret; | 483 | return 1; |
445 | } | 484 | } |
485 | __setup("mpurate=", omap_clk_setup); | ||
446 | 486 | ||
447 | int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) | 487 | /** |
488 | * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock | ||
489 | * @clk: struct clk * to initialize | ||
490 | * | ||
491 | * Add an OMAP clock @clk to the internal list of OMAP clocks. Used | ||
492 | * temporarily for autoidle handling, until this support can be | ||
493 | * integrated into the common clock framework code in some way. No | ||
494 | * return value. | ||
495 | */ | ||
496 | void omap2_init_clk_hw_omap_clocks(struct clk *clk) | ||
448 | { | 497 | { |
449 | if (!clk->clksel) | 498 | struct clk_hw_omap *c; |
450 | return -EINVAL; | ||
451 | 499 | ||
452 | if (clk->parent == new_parent) | 500 | if (__clk_get_flags(clk) & CLK_IS_BASIC) |
453 | return 0; | 501 | return; |
454 | 502 | ||
455 | return omap2_clksel_set_parent(clk, new_parent); | 503 | c = to_clk_hw_omap(__clk_get_hw(clk)); |
504 | list_add(&c->node, &clk_hw_omap_clocks); | ||
456 | } | 505 | } |
457 | 506 | ||
458 | /* | 507 | /** |
459 | * OMAP2+ clock reset and init functions | 508 | * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that |
509 | * support it | ||
510 | * | ||
511 | * Enable clock autoidle on all OMAP clocks that have allow_idle | ||
512 | * function pointers associated with them. This function is intended | ||
513 | * to be temporary until support for this is added to the common clock | ||
514 | * code. Returns 0. | ||
460 | */ | 515 | */ |
516 | int omap2_clk_enable_autoidle_all(void) | ||
517 | { | ||
518 | struct clk_hw_omap *c; | ||
461 | 519 | ||
462 | #ifdef CONFIG_OMAP_RESET_CLOCKS | 520 | list_for_each_entry(c, &clk_hw_omap_clocks, node) |
463 | void omap2_clk_disable_unused(struct clk *clk) | 521 | if (c->ops && c->ops->allow_idle) |
522 | c->ops->allow_idle(c); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that | ||
528 | * support it | ||
529 | * | ||
530 | * Disable clock autoidle on all OMAP clocks that have allow_idle | ||
531 | * function pointers associated with them. This function is intended | ||
532 | * to be temporary until support for this is added to the common clock | ||
533 | * code. Returns 0. | ||
534 | */ | ||
535 | int omap2_clk_disable_autoidle_all(void) | ||
464 | { | 536 | { |
465 | u32 regval32, v; | 537 | struct clk_hw_omap *c; |
466 | 538 | ||
467 | v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; | 539 | list_for_each_entry(c, &clk_hw_omap_clocks, node) |
540 | if (c->ops && c->ops->deny_idle) | ||
541 | c->ops->deny_idle(c); | ||
542 | return 0; | ||
543 | } | ||
468 | 544 | ||
469 | regval32 = __raw_readl(clk->enable_reg); | 545 | /** |
470 | if ((regval32 & (1 << clk->enable_bit)) == v) | 546 | * omap2_clk_enable_init_clocks - prepare & enable a list of clocks |
471 | return; | 547 | * @clk_names: ptr to an array of strings of clock names to enable |
548 | * @num_clocks: number of clock names in @clk_names | ||
549 | * | ||
550 | * Prepare and enable a list of clocks, named by @clk_names. No | ||
551 | * return value. XXX Deprecated; only needed until these clocks are | ||
552 | * properly claimed and enabled by the drivers or core code that uses | ||
553 | * them. XXX What code disables & calls clk_put on these clocks? | ||
554 | */ | ||
555 | void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) | ||
556 | { | ||
557 | struct clk *init_clk; | ||
558 | int i; | ||
472 | 559 | ||
473 | pr_debug("Disabling unused clock \"%s\"\n", clk->name); | 560 | for (i = 0; i < num_clocks; i++) { |
474 | if (cpu_is_omap34xx()) { | 561 | init_clk = clk_get(NULL, clk_names[i]); |
475 | omap2_clk_enable(clk); | 562 | clk_prepare_enable(init_clk); |
476 | omap2_clk_disable(clk); | ||
477 | } else { | ||
478 | clk->ops->disable(clk); | ||
479 | } | 563 | } |
480 | if (clk->clkdm != NULL) | ||
481 | pwrdm_state_switch(clk->clkdm->pwrdm.ptr); | ||
482 | } | 564 | } |
483 | #endif | 565 | |
566 | const struct clk_hw_omap_ops clkhwops_wait = { | ||
567 | .find_idlest = omap2_clk_dflt_find_idlest, | ||
568 | .find_companion = omap2_clk_dflt_find_companion, | ||
569 | }; | ||
484 | 570 | ||
485 | /** | 571 | /** |
486 | * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument | 572 | * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument |
@@ -512,14 +598,12 @@ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) | |||
512 | r = clk_set_rate(mpurate_ck, mpurate); | 598 | r = clk_set_rate(mpurate_ck, mpurate); |
513 | if (IS_ERR_VALUE(r)) { | 599 | if (IS_ERR_VALUE(r)) { |
514 | WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", | 600 | WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", |
515 | mpurate_ck->name, mpurate, r); | 601 | mpurate_ck_name, mpurate, r); |
516 | clk_put(mpurate_ck); | 602 | clk_put(mpurate_ck); |
517 | return -EINVAL; | 603 | return -EINVAL; |
518 | } | 604 | } |
519 | 605 | ||
520 | calibrate_delay(); | 606 | calibrate_delay(); |
521 | recalculate_root_clocks(); | ||
522 | |||
523 | clk_put(mpurate_ck); | 607 | clk_put(mpurate_ck); |
524 | 608 | ||
525 | return 0; | 609 | return 0; |
@@ -563,513 +647,3 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name, | |||
563 | (clk_get_rate(core_ck) / 1000000), | 647 | (clk_get_rate(core_ck) / 1000000), |
564 | (clk_get_rate(mpu_ck) / 1000000)); | 648 | (clk_get_rate(mpu_ck) / 1000000)); |
565 | } | 649 | } |
566 | |||
567 | /* Common data */ | ||
568 | |||
569 | int clk_enable(struct clk *clk) | ||
570 | { | ||
571 | unsigned long flags; | ||
572 | int ret; | ||
573 | |||
574 | if (clk == NULL || IS_ERR(clk)) | ||
575 | return -EINVAL; | ||
576 | |||
577 | spin_lock_irqsave(&clockfw_lock, flags); | ||
578 | ret = omap2_clk_enable(clk); | ||
579 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | EXPORT_SYMBOL(clk_enable); | ||
584 | |||
585 | void clk_disable(struct clk *clk) | ||
586 | { | ||
587 | unsigned long flags; | ||
588 | |||
589 | if (clk == NULL || IS_ERR(clk)) | ||
590 | return; | ||
591 | |||
592 | spin_lock_irqsave(&clockfw_lock, flags); | ||
593 | if (clk->usecount == 0) { | ||
594 | pr_err("Trying disable clock %s with 0 usecount\n", | ||
595 | clk->name); | ||
596 | WARN_ON(1); | ||
597 | goto out; | ||
598 | } | ||
599 | |||
600 | omap2_clk_disable(clk); | ||
601 | |||
602 | out: | ||
603 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
604 | } | ||
605 | EXPORT_SYMBOL(clk_disable); | ||
606 | |||
607 | unsigned long clk_get_rate(struct clk *clk) | ||
608 | { | ||
609 | unsigned long flags; | ||
610 | unsigned long ret; | ||
611 | |||
612 | if (clk == NULL || IS_ERR(clk)) | ||
613 | return 0; | ||
614 | |||
615 | spin_lock_irqsave(&clockfw_lock, flags); | ||
616 | ret = clk->rate; | ||
617 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
618 | |||
619 | return ret; | ||
620 | } | ||
621 | EXPORT_SYMBOL(clk_get_rate); | ||
622 | |||
623 | /* | ||
624 | * Optional clock functions defined in include/linux/clk.h | ||
625 | */ | ||
626 | |||
627 | long clk_round_rate(struct clk *clk, unsigned long rate) | ||
628 | { | ||
629 | unsigned long flags; | ||
630 | long ret; | ||
631 | |||
632 | if (clk == NULL || IS_ERR(clk)) | ||
633 | return 0; | ||
634 | |||
635 | spin_lock_irqsave(&clockfw_lock, flags); | ||
636 | ret = omap2_clk_round_rate(clk, rate); | ||
637 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
638 | |||
639 | return ret; | ||
640 | } | ||
641 | EXPORT_SYMBOL(clk_round_rate); | ||
642 | |||
643 | int clk_set_rate(struct clk *clk, unsigned long rate) | ||
644 | { | ||
645 | unsigned long flags; | ||
646 | int ret = -EINVAL; | ||
647 | |||
648 | if (clk == NULL || IS_ERR(clk)) | ||
649 | return ret; | ||
650 | |||
651 | spin_lock_irqsave(&clockfw_lock, flags); | ||
652 | ret = omap2_clk_set_rate(clk, rate); | ||
653 | if (ret == 0) | ||
654 | propagate_rate(clk); | ||
655 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
656 | |||
657 | return ret; | ||
658 | } | ||
659 | EXPORT_SYMBOL(clk_set_rate); | ||
660 | |||
661 | int clk_set_parent(struct clk *clk, struct clk *parent) | ||
662 | { | ||
663 | unsigned long flags; | ||
664 | int ret = -EINVAL; | ||
665 | |||
666 | if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) | ||
667 | return ret; | ||
668 | |||
669 | spin_lock_irqsave(&clockfw_lock, flags); | ||
670 | if (clk->usecount == 0) { | ||
671 | ret = omap2_clk_set_parent(clk, parent); | ||
672 | if (ret == 0) | ||
673 | propagate_rate(clk); | ||
674 | } else { | ||
675 | ret = -EBUSY; | ||
676 | } | ||
677 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
678 | |||
679 | return ret; | ||
680 | } | ||
681 | EXPORT_SYMBOL(clk_set_parent); | ||
682 | |||
683 | struct clk *clk_get_parent(struct clk *clk) | ||
684 | { | ||
685 | return clk->parent; | ||
686 | } | ||
687 | EXPORT_SYMBOL(clk_get_parent); | ||
688 | |||
689 | /* | ||
690 | * OMAP specific clock functions shared between omap1 and omap2 | ||
691 | */ | ||
692 | |||
693 | int __initdata mpurate; | ||
694 | |||
695 | /* | ||
696 | * By default we use the rate set by the bootloader. | ||
697 | * You can override this with mpurate= cmdline option. | ||
698 | */ | ||
699 | static int __init omap_clk_setup(char *str) | ||
700 | { | ||
701 | get_option(&str, &mpurate); | ||
702 | |||
703 | if (!mpurate) | ||
704 | return 1; | ||
705 | |||
706 | if (mpurate < 1000) | ||
707 | mpurate *= 1000000; | ||
708 | |||
709 | return 1; | ||
710 | } | ||
711 | __setup("mpurate=", omap_clk_setup); | ||
712 | |||
713 | /* Used for clocks that always have same value as the parent clock */ | ||
714 | unsigned long followparent_recalc(struct clk *clk) | ||
715 | { | ||
716 | return clk->parent->rate; | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * Used for clocks that have the same value as the parent clock, | ||
721 | * divided by some factor | ||
722 | */ | ||
723 | unsigned long omap_fixed_divisor_recalc(struct clk *clk) | ||
724 | { | ||
725 | WARN_ON(!clk->fixed_div); | ||
726 | |||
727 | return clk->parent->rate / clk->fixed_div; | ||
728 | } | ||
729 | |||
730 | void clk_reparent(struct clk *child, struct clk *parent) | ||
731 | { | ||
732 | list_del_init(&child->sibling); | ||
733 | if (parent) | ||
734 | list_add(&child->sibling, &parent->children); | ||
735 | child->parent = parent; | ||
736 | |||
737 | /* now do the debugfs renaming to reattach the child | ||
738 | to the proper parent */ | ||
739 | } | ||
740 | |||
741 | /* Propagate rate to children */ | ||
742 | void propagate_rate(struct clk *tclk) | ||
743 | { | ||
744 | struct clk *clkp; | ||
745 | |||
746 | list_for_each_entry(clkp, &tclk->children, sibling) { | ||
747 | if (clkp->recalc) | ||
748 | clkp->rate = clkp->recalc(clkp); | ||
749 | propagate_rate(clkp); | ||
750 | } | ||
751 | } | ||
752 | |||
753 | static LIST_HEAD(root_clks); | ||
754 | |||
755 | /** | ||
756 | * recalculate_root_clocks - recalculate and propagate all root clocks | ||
757 | * | ||
758 | * Recalculates all root clocks (clocks with no parent), which if the | ||
759 | * clock's .recalc is set correctly, should also propagate their rates. | ||
760 | * Called at init. | ||
761 | */ | ||
762 | void recalculate_root_clocks(void) | ||
763 | { | ||
764 | struct clk *clkp; | ||
765 | |||
766 | list_for_each_entry(clkp, &root_clks, sibling) { | ||
767 | if (clkp->recalc) | ||
768 | clkp->rate = clkp->recalc(clkp); | ||
769 | propagate_rate(clkp); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | /** | ||
774 | * clk_preinit - initialize any fields in the struct clk before clk init | ||
775 | * @clk: struct clk * to initialize | ||
776 | * | ||
777 | * Initialize any struct clk fields needed before normal clk initialization | ||
778 | * can run. No return value. | ||
779 | */ | ||
780 | void clk_preinit(struct clk *clk) | ||
781 | { | ||
782 | INIT_LIST_HEAD(&clk->children); | ||
783 | } | ||
784 | |||
785 | int clk_register(struct clk *clk) | ||
786 | { | ||
787 | if (clk == NULL || IS_ERR(clk)) | ||
788 | return -EINVAL; | ||
789 | |||
790 | /* | ||
791 | * trap out already registered clocks | ||
792 | */ | ||
793 | if (clk->node.next || clk->node.prev) | ||
794 | return 0; | ||
795 | |||
796 | mutex_lock(&clocks_mutex); | ||
797 | if (clk->parent) | ||
798 | list_add(&clk->sibling, &clk->parent->children); | ||
799 | else | ||
800 | list_add(&clk->sibling, &root_clks); | ||
801 | |||
802 | list_add(&clk->node, &clocks); | ||
803 | if (clk->init) | ||
804 | clk->init(clk); | ||
805 | mutex_unlock(&clocks_mutex); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | EXPORT_SYMBOL(clk_register); | ||
810 | |||
811 | void clk_unregister(struct clk *clk) | ||
812 | { | ||
813 | if (clk == NULL || IS_ERR(clk)) | ||
814 | return; | ||
815 | |||
816 | mutex_lock(&clocks_mutex); | ||
817 | list_del(&clk->sibling); | ||
818 | list_del(&clk->node); | ||
819 | mutex_unlock(&clocks_mutex); | ||
820 | } | ||
821 | EXPORT_SYMBOL(clk_unregister); | ||
822 | |||
823 | void clk_enable_init_clocks(void) | ||
824 | { | ||
825 | struct clk *clkp; | ||
826 | |||
827 | list_for_each_entry(clkp, &clocks, node) | ||
828 | if (clkp->flags & ENABLE_ON_INIT) | ||
829 | clk_enable(clkp); | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | * omap_clk_get_by_name - locate OMAP struct clk by its name | ||
834 | * @name: name of the struct clk to locate | ||
835 | * | ||
836 | * Locate an OMAP struct clk by its name. Assumes that struct clk | ||
837 | * names are unique. Returns NULL if not found or a pointer to the | ||
838 | * struct clk if found. | ||
839 | */ | ||
840 | struct clk *omap_clk_get_by_name(const char *name) | ||
841 | { | ||
842 | struct clk *c; | ||
843 | struct clk *ret = NULL; | ||
844 | |||
845 | mutex_lock(&clocks_mutex); | ||
846 | |||
847 | list_for_each_entry(c, &clocks, node) { | ||
848 | if (!strcmp(c->name, name)) { | ||
849 | ret = c; | ||
850 | break; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | mutex_unlock(&clocks_mutex); | ||
855 | |||
856 | return ret; | ||
857 | } | ||
858 | |||
859 | int omap_clk_enable_autoidle_all(void) | ||
860 | { | ||
861 | struct clk *c; | ||
862 | unsigned long flags; | ||
863 | |||
864 | spin_lock_irqsave(&clockfw_lock, flags); | ||
865 | |||
866 | list_for_each_entry(c, &clocks, node) | ||
867 | if (c->ops->allow_idle) | ||
868 | c->ops->allow_idle(c); | ||
869 | |||
870 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | int omap_clk_disable_autoidle_all(void) | ||
876 | { | ||
877 | struct clk *c; | ||
878 | unsigned long flags; | ||
879 | |||
880 | spin_lock_irqsave(&clockfw_lock, flags); | ||
881 | |||
882 | list_for_each_entry(c, &clocks, node) | ||
883 | if (c->ops->deny_idle) | ||
884 | c->ops->deny_idle(c); | ||
885 | |||
886 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
887 | |||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | /* | ||
892 | * Low level helpers | ||
893 | */ | ||
894 | static int clkll_enable_null(struct clk *clk) | ||
895 | { | ||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | static void clkll_disable_null(struct clk *clk) | ||
900 | { | ||
901 | } | ||
902 | |||
903 | const struct clkops clkops_null = { | ||
904 | .enable = clkll_enable_null, | ||
905 | .disable = clkll_disable_null, | ||
906 | }; | ||
907 | |||
908 | /* | ||
909 | * Dummy clock | ||
910 | * | ||
911 | * Used for clock aliases that are needed on some OMAPs, but not others | ||
912 | */ | ||
913 | struct clk dummy_ck = { | ||
914 | .name = "dummy", | ||
915 | .ops = &clkops_null, | ||
916 | }; | ||
917 | |||
918 | /* | ||
919 | * | ||
920 | */ | ||
921 | |||
922 | #ifdef CONFIG_OMAP_RESET_CLOCKS | ||
923 | /* | ||
924 | * Disable any unused clocks left on by the bootloader | ||
925 | */ | ||
926 | static int __init clk_disable_unused(void) | ||
927 | { | ||
928 | struct clk *ck; | ||
929 | unsigned long flags; | ||
930 | |||
931 | pr_info("clock: disabling unused clocks to save power\n"); | ||
932 | |||
933 | spin_lock_irqsave(&clockfw_lock, flags); | ||
934 | list_for_each_entry(ck, &clocks, node) { | ||
935 | if (ck->ops == &clkops_null) | ||
936 | continue; | ||
937 | |||
938 | if (ck->usecount > 0 || !ck->enable_reg) | ||
939 | continue; | ||
940 | |||
941 | omap2_clk_disable_unused(ck); | ||
942 | } | ||
943 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
944 | |||
945 | return 0; | ||
946 | } | ||
947 | late_initcall(clk_disable_unused); | ||
948 | late_initcall(omap_clk_enable_autoidle_all); | ||
949 | #endif | ||
950 | |||
951 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) | ||
952 | /* | ||
953 | * debugfs support to trace clock tree hierarchy and attributes | ||
954 | */ | ||
955 | |||
956 | #include <linux/debugfs.h> | ||
957 | #include <linux/seq_file.h> | ||
958 | |||
959 | static struct dentry *clk_debugfs_root; | ||
960 | |||
961 | static int clk_dbg_show_summary(struct seq_file *s, void *unused) | ||
962 | { | ||
963 | struct clk *c; | ||
964 | struct clk *pa; | ||
965 | |||
966 | mutex_lock(&clocks_mutex); | ||
967 | seq_printf(s, "%-30s %-30s %-10s %s\n", | ||
968 | "clock-name", "parent-name", "rate", "use-count"); | ||
969 | |||
970 | list_for_each_entry(c, &clocks, node) { | ||
971 | pa = c->parent; | ||
972 | seq_printf(s, "%-30s %-30s %-10lu %d\n", | ||
973 | c->name, pa ? pa->name : "none", c->rate, | ||
974 | c->usecount); | ||
975 | } | ||
976 | mutex_unlock(&clocks_mutex); | ||
977 | |||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int clk_dbg_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | return single_open(file, clk_dbg_show_summary, inode->i_private); | ||
984 | } | ||
985 | |||
986 | static const struct file_operations debug_clock_fops = { | ||
987 | .open = clk_dbg_open, | ||
988 | .read = seq_read, | ||
989 | .llseek = seq_lseek, | ||
990 | .release = single_release, | ||
991 | }; | ||
992 | |||
993 | static int clk_debugfs_register_one(struct clk *c) | ||
994 | { | ||
995 | int err; | ||
996 | struct dentry *d; | ||
997 | struct clk *pa = c->parent; | ||
998 | |||
999 | d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); | ||
1000 | if (!d) | ||
1001 | return -ENOMEM; | ||
1002 | c->dent = d; | ||
1003 | |||
1004 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); | ||
1005 | if (!d) { | ||
1006 | err = -ENOMEM; | ||
1007 | goto err_out; | ||
1008 | } | ||
1009 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); | ||
1010 | if (!d) { | ||
1011 | err = -ENOMEM; | ||
1012 | goto err_out; | ||
1013 | } | ||
1014 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); | ||
1015 | if (!d) { | ||
1016 | err = -ENOMEM; | ||
1017 | goto err_out; | ||
1018 | } | ||
1019 | return 0; | ||
1020 | |||
1021 | err_out: | ||
1022 | debugfs_remove_recursive(c->dent); | ||
1023 | return err; | ||
1024 | } | ||
1025 | |||
1026 | static int clk_debugfs_register(struct clk *c) | ||
1027 | { | ||
1028 | int err; | ||
1029 | struct clk *pa = c->parent; | ||
1030 | |||
1031 | if (pa && !pa->dent) { | ||
1032 | err = clk_debugfs_register(pa); | ||
1033 | if (err) | ||
1034 | return err; | ||
1035 | } | ||
1036 | |||
1037 | if (!c->dent) { | ||
1038 | err = clk_debugfs_register_one(c); | ||
1039 | if (err) | ||
1040 | return err; | ||
1041 | } | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | static int __init clk_debugfs_init(void) | ||
1046 | { | ||
1047 | struct clk *c; | ||
1048 | struct dentry *d; | ||
1049 | int err; | ||
1050 | |||
1051 | d = debugfs_create_dir("clock", NULL); | ||
1052 | if (!d) | ||
1053 | return -ENOMEM; | ||
1054 | clk_debugfs_root = d; | ||
1055 | |||
1056 | list_for_each_entry(c, &clocks, node) { | ||
1057 | err = clk_debugfs_register(c); | ||
1058 | if (err) | ||
1059 | goto err_out; | ||
1060 | } | ||
1061 | |||
1062 | d = debugfs_create_file("summary", S_IRUGO, | ||
1063 | d, NULL, &debug_clock_fops); | ||
1064 | if (!d) | ||
1065 | return -ENOMEM; | ||
1066 | |||
1067 | return 0; | ||
1068 | err_out: | ||
1069 | debugfs_remove_recursive(clk_debugfs_root); | ||
1070 | return err; | ||
1071 | } | ||
1072 | late_initcall(clk_debugfs_init); | ||
1073 | |||
1074 | #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ | ||
1075 | |||