diff options
author | Stephen Boyd <sboyd@codeaurora.org> | 2015-04-30 17:43:22 -0400 |
---|---|---|
committer | Stephen Boyd <sboyd@codeaurora.org> | 2015-04-30 19:28:39 -0400 |
commit | 4dff95dc9477a34de77d24c59dcf1dc593687fcf (patch) | |
tree | 0a8bdf1a5ccdc423796e9976c56370956b9edb99 /drivers/clk/clk.c | |
parent | 1f3e1983429d31ceada9a09197d79445c92a2901 (diff) |
clk: Remove forward declared function prototypes
Move the code around so that we don't need to declare function
prototypes at the start of the file. Simplify
clk_core_is_prepared() and clk_core_is_enabled() too to make the
diff easier to read.
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r-- | drivers/clk/clk.c | 811 |
1 files changed, 393 insertions, 418 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 3e58b7453076..0001b91f2b6e 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -37,13 +37,6 @@ static HLIST_HEAD(clk_root_list); | |||
37 | static HLIST_HEAD(clk_orphan_list); | 37 | static HLIST_HEAD(clk_orphan_list); |
38 | static LIST_HEAD(clk_notifier_list); | 38 | static LIST_HEAD(clk_notifier_list); |
39 | 39 | ||
40 | static long clk_core_get_accuracy(struct clk_core *core); | ||
41 | static unsigned long clk_core_get_rate(struct clk_core *core); | ||
42 | static int clk_core_get_phase(struct clk_core *core); | ||
43 | static bool clk_core_is_prepared(struct clk_core *core); | ||
44 | static bool clk_core_is_enabled(struct clk_core *core); | ||
45 | static struct clk_core *clk_core_lookup(const char *name); | ||
46 | |||
47 | /*** private data structures ***/ | 40 | /*** private data structures ***/ |
48 | 41 | ||
49 | struct clk_core { | 42 | struct clk_core { |
@@ -145,338 +138,29 @@ static void clk_enable_unlock(unsigned long flags) | |||
145 | spin_unlock_irqrestore(&enable_lock, flags); | 138 | spin_unlock_irqrestore(&enable_lock, flags); |
146 | } | 139 | } |
147 | 140 | ||
148 | /*** debugfs support ***/ | 141 | static bool clk_core_is_prepared(struct clk_core *core) |
149 | |||
150 | #ifdef CONFIG_DEBUG_FS | ||
151 | #include <linux/debugfs.h> | ||
152 | |||
153 | static struct dentry *rootdir; | ||
154 | static int inited = 0; | ||
155 | static DEFINE_MUTEX(clk_debug_lock); | ||
156 | static HLIST_HEAD(clk_debug_list); | ||
157 | |||
158 | static struct hlist_head *all_lists[] = { | ||
159 | &clk_root_list, | ||
160 | &clk_orphan_list, | ||
161 | NULL, | ||
162 | }; | ||
163 | |||
164 | static struct hlist_head *orphan_list[] = { | ||
165 | &clk_orphan_list, | ||
166 | NULL, | ||
167 | }; | ||
168 | |||
169 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, | ||
170 | int level) | ||
171 | { | ||
172 | if (!c) | ||
173 | return; | ||
174 | |||
175 | seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", | ||
176 | level * 3 + 1, "", | ||
177 | 30 - level * 3, c->name, | ||
178 | c->enable_count, c->prepare_count, clk_core_get_rate(c), | ||
179 | clk_core_get_accuracy(c), clk_core_get_phase(c)); | ||
180 | } | ||
181 | |||
182 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, | ||
183 | int level) | ||
184 | { | ||
185 | struct clk_core *child; | ||
186 | |||
187 | if (!c) | ||
188 | return; | ||
189 | |||
190 | clk_summary_show_one(s, c, level); | ||
191 | |||
192 | hlist_for_each_entry(child, &c->children, child_node) | ||
193 | clk_summary_show_subtree(s, child, level + 1); | ||
194 | } | ||
195 | |||
196 | static int clk_summary_show(struct seq_file *s, void *data) | ||
197 | { | ||
198 | struct clk_core *c; | ||
199 | struct hlist_head **lists = (struct hlist_head **)s->private; | ||
200 | |||
201 | seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); | ||
202 | seq_puts(s, "----------------------------------------------------------------------------------------\n"); | ||
203 | |||
204 | clk_prepare_lock(); | ||
205 | |||
206 | for (; *lists; lists++) | ||
207 | hlist_for_each_entry(c, *lists, child_node) | ||
208 | clk_summary_show_subtree(s, c, 0); | ||
209 | |||
210 | clk_prepare_unlock(); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | |||
216 | static int clk_summary_open(struct inode *inode, struct file *file) | ||
217 | { | ||
218 | return single_open(file, clk_summary_show, inode->i_private); | ||
219 | } | ||
220 | |||
221 | static const struct file_operations clk_summary_fops = { | ||
222 | .open = clk_summary_open, | ||
223 | .read = seq_read, | ||
224 | .llseek = seq_lseek, | ||
225 | .release = single_release, | ||
226 | }; | ||
227 | |||
228 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) | ||
229 | { | ||
230 | if (!c) | ||
231 | return; | ||
232 | |||
233 | seq_printf(s, "\"%s\": { ", c->name); | ||
234 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | ||
235 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | ||
236 | seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c)); | ||
237 | seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c)); | ||
238 | seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); | ||
239 | } | ||
240 | |||
241 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) | ||
242 | { | ||
243 | struct clk_core *child; | ||
244 | |||
245 | if (!c) | ||
246 | return; | ||
247 | |||
248 | clk_dump_one(s, c, level); | ||
249 | |||
250 | hlist_for_each_entry(child, &c->children, child_node) { | ||
251 | seq_printf(s, ","); | ||
252 | clk_dump_subtree(s, child, level + 1); | ||
253 | } | ||
254 | |||
255 | seq_printf(s, "}"); | ||
256 | } | ||
257 | |||
258 | static int clk_dump(struct seq_file *s, void *data) | ||
259 | { | ||
260 | struct clk_core *c; | ||
261 | bool first_node = true; | ||
262 | struct hlist_head **lists = (struct hlist_head **)s->private; | ||
263 | |||
264 | seq_printf(s, "{"); | ||
265 | |||
266 | clk_prepare_lock(); | ||
267 | |||
268 | for (; *lists; lists++) { | ||
269 | hlist_for_each_entry(c, *lists, child_node) { | ||
270 | if (!first_node) | ||
271 | seq_puts(s, ","); | ||
272 | first_node = false; | ||
273 | clk_dump_subtree(s, c, 0); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | clk_prepare_unlock(); | ||
278 | |||
279 | seq_printf(s, "}"); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | |||
284 | static int clk_dump_open(struct inode *inode, struct file *file) | ||
285 | { | ||
286 | return single_open(file, clk_dump, inode->i_private); | ||
287 | } | ||
288 | |||
289 | static const struct file_operations clk_dump_fops = { | ||
290 | .open = clk_dump_open, | ||
291 | .read = seq_read, | ||
292 | .llseek = seq_lseek, | ||
293 | .release = single_release, | ||
294 | }; | ||
295 | |||
296 | static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) | ||
297 | { | ||
298 | struct dentry *d; | ||
299 | int ret = -ENOMEM; | ||
300 | |||
301 | if (!core || !pdentry) { | ||
302 | ret = -EINVAL; | ||
303 | goto out; | ||
304 | } | ||
305 | |||
306 | d = debugfs_create_dir(core->name, pdentry); | ||
307 | if (!d) | ||
308 | goto out; | ||
309 | |||
310 | core->dentry = d; | ||
311 | |||
312 | d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, | ||
313 | (u32 *)&core->rate); | ||
314 | if (!d) | ||
315 | goto err_out; | ||
316 | |||
317 | d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, | ||
318 | (u32 *)&core->accuracy); | ||
319 | if (!d) | ||
320 | goto err_out; | ||
321 | |||
322 | d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, | ||
323 | (u32 *)&core->phase); | ||
324 | if (!d) | ||
325 | goto err_out; | ||
326 | |||
327 | d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, | ||
328 | (u32 *)&core->flags); | ||
329 | if (!d) | ||
330 | goto err_out; | ||
331 | |||
332 | d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, | ||
333 | (u32 *)&core->prepare_count); | ||
334 | if (!d) | ||
335 | goto err_out; | ||
336 | |||
337 | d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, | ||
338 | (u32 *)&core->enable_count); | ||
339 | if (!d) | ||
340 | goto err_out; | ||
341 | |||
342 | d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, | ||
343 | (u32 *)&core->notifier_count); | ||
344 | if (!d) | ||
345 | goto err_out; | ||
346 | |||
347 | if (core->ops->debug_init) { | ||
348 | ret = core->ops->debug_init(core->hw, core->dentry); | ||
349 | if (ret) | ||
350 | goto err_out; | ||
351 | } | ||
352 | |||
353 | ret = 0; | ||
354 | goto out; | ||
355 | |||
356 | err_out: | ||
357 | debugfs_remove_recursive(core->dentry); | ||
358 | core->dentry = NULL; | ||
359 | out: | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * clk_debug_register - add a clk node to the debugfs clk tree | ||
365 | * @core: the clk being added to the debugfs clk tree | ||
366 | * | ||
367 | * Dynamically adds a clk to the debugfs clk tree if debugfs has been | ||
368 | * initialized. Otherwise it bails out early since the debugfs clk tree | ||
369 | * will be created lazily by clk_debug_init as part of a late_initcall. | ||
370 | */ | ||
371 | static int clk_debug_register(struct clk_core *core) | ||
372 | { | ||
373 | int ret = 0; | ||
374 | |||
375 | mutex_lock(&clk_debug_lock); | ||
376 | hlist_add_head(&core->debug_node, &clk_debug_list); | ||
377 | |||
378 | if (!inited) | ||
379 | goto unlock; | ||
380 | |||
381 | ret = clk_debug_create_one(core, rootdir); | ||
382 | unlock: | ||
383 | mutex_unlock(&clk_debug_lock); | ||
384 | |||
385 | return ret; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * clk_debug_unregister - remove a clk node from the debugfs clk tree | ||
390 | * @core: the clk being removed from the debugfs clk tree | ||
391 | * | ||
392 | * Dynamically removes a clk and all it's children clk nodes from the | ||
393 | * debugfs clk tree if clk->dentry points to debugfs created by | ||
394 | * clk_debug_register in __clk_init. | ||
395 | */ | ||
396 | static void clk_debug_unregister(struct clk_core *core) | ||
397 | { | ||
398 | mutex_lock(&clk_debug_lock); | ||
399 | hlist_del_init(&core->debug_node); | ||
400 | debugfs_remove_recursive(core->dentry); | ||
401 | core->dentry = NULL; | ||
402 | mutex_unlock(&clk_debug_lock); | ||
403 | } | ||
404 | |||
405 | struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, | ||
406 | void *data, const struct file_operations *fops) | ||
407 | { | 142 | { |
408 | struct dentry *d = NULL; | 143 | /* |
409 | 144 | * .is_prepared is optional for clocks that can prepare | |
410 | if (hw->core->dentry) | 145 | * fall back to software usage counter if it is missing |
411 | d = debugfs_create_file(name, mode, hw->core->dentry, data, | 146 | */ |
412 | fops); | 147 | if (!core->ops->is_prepared) |
148 | return core->prepare_count; | ||
413 | 149 | ||
414 | return d; | 150 | return core->ops->is_prepared(core->hw); |
415 | } | 151 | } |
416 | EXPORT_SYMBOL_GPL(clk_debugfs_add_file); | ||
417 | 152 | ||
418 | /** | 153 | static bool clk_core_is_enabled(struct clk_core *core) |
419 | * clk_debug_init - lazily create the debugfs clk tree visualization | ||
420 | * | ||
421 | * clks are often initialized very early during boot before memory can | ||
422 | * be dynamically allocated and well before debugfs is setup. | ||
423 | * clk_debug_init walks the clk tree hierarchy while holding | ||
424 | * prepare_lock and creates the topology as part of a late_initcall, | ||
425 | * thus insuring that clks initialized very early will still be | ||
426 | * represented in the debugfs clk tree. This function should only be | ||
427 | * called once at boot-time, and all other clks added dynamically will | ||
428 | * be done so with clk_debug_register. | ||
429 | */ | ||
430 | static int __init clk_debug_init(void) | ||
431 | { | 154 | { |
432 | struct clk_core *core; | 155 | /* |
433 | struct dentry *d; | 156 | * .is_enabled is only mandatory for clocks that gate |
434 | 157 | * fall back to software usage counter if .is_enabled is missing | |
435 | rootdir = debugfs_create_dir("clk", NULL); | 158 | */ |
436 | 159 | if (!core->ops->is_enabled) | |
437 | if (!rootdir) | 160 | return core->enable_count; |
438 | return -ENOMEM; | ||
439 | |||
440 | d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, | ||
441 | &clk_summary_fops); | ||
442 | if (!d) | ||
443 | return -ENOMEM; | ||
444 | |||
445 | d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, | ||
446 | &clk_dump_fops); | ||
447 | if (!d) | ||
448 | return -ENOMEM; | ||
449 | |||
450 | d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, | ||
451 | &orphan_list, &clk_summary_fops); | ||
452 | if (!d) | ||
453 | return -ENOMEM; | ||
454 | |||
455 | d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, | ||
456 | &orphan_list, &clk_dump_fops); | ||
457 | if (!d) | ||
458 | return -ENOMEM; | ||
459 | |||
460 | mutex_lock(&clk_debug_lock); | ||
461 | hlist_for_each_entry(core, &clk_debug_list, debug_node) | ||
462 | clk_debug_create_one(core, rootdir); | ||
463 | |||
464 | inited = 1; | ||
465 | mutex_unlock(&clk_debug_lock); | ||
466 | 161 | ||
467 | return 0; | 162 | return core->ops->is_enabled(core->hw); |
468 | } | ||
469 | late_initcall(clk_debug_init); | ||
470 | #else | ||
471 | static inline int clk_debug_register(struct clk_core *core) { return 0; } | ||
472 | static inline void clk_debug_reparent(struct clk_core *core, | ||
473 | struct clk_core *new_parent) | ||
474 | { | ||
475 | } | ||
476 | static inline void clk_debug_unregister(struct clk_core *core) | ||
477 | { | ||
478 | } | 163 | } |
479 | #endif | ||
480 | 164 | ||
481 | /* caller must hold prepare_lock */ | 165 | /* caller must hold prepare_lock */ |
482 | static void clk_unprepare_unused_subtree(struct clk_core *core) | 166 | static void clk_unprepare_unused_subtree(struct clk_core *core) |
@@ -608,6 +292,49 @@ struct clk *__clk_get_parent(struct clk *clk) | |||
608 | } | 292 | } |
609 | EXPORT_SYMBOL_GPL(__clk_get_parent); | 293 | EXPORT_SYMBOL_GPL(__clk_get_parent); |
610 | 294 | ||
295 | static struct clk_core *__clk_lookup_subtree(const char *name, | ||
296 | struct clk_core *core) | ||
297 | { | ||
298 | struct clk_core *child; | ||
299 | struct clk_core *ret; | ||
300 | |||
301 | if (!strcmp(core->name, name)) | ||
302 | return core; | ||
303 | |||
304 | hlist_for_each_entry(child, &core->children, child_node) { | ||
305 | ret = __clk_lookup_subtree(name, child); | ||
306 | if (ret) | ||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | return NULL; | ||
311 | } | ||
312 | |||
313 | static struct clk_core *clk_core_lookup(const char *name) | ||
314 | { | ||
315 | struct clk_core *root_clk; | ||
316 | struct clk_core *ret; | ||
317 | |||
318 | if (!name) | ||
319 | return NULL; | ||
320 | |||
321 | /* search the 'proper' clk tree first */ | ||
322 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { | ||
323 | ret = __clk_lookup_subtree(name, root_clk); | ||
324 | if (ret) | ||
325 | return ret; | ||
326 | } | ||
327 | |||
328 | /* if not found, then search the orphan tree */ | ||
329 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { | ||
330 | ret = __clk_lookup_subtree(name, root_clk); | ||
331 | if (ret) | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | return NULL; | ||
336 | } | ||
337 | |||
611 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, | 338 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, |
612 | u8 index) | 339 | u8 index) |
613 | { | 340 | { |
@@ -684,27 +411,6 @@ unsigned long __clk_get_flags(struct clk *clk) | |||
684 | } | 411 | } |
685 | EXPORT_SYMBOL_GPL(__clk_get_flags); | 412 | EXPORT_SYMBOL_GPL(__clk_get_flags); |
686 | 413 | ||
687 | static bool clk_core_is_prepared(struct clk_core *core) | ||
688 | { | ||
689 | int ret; | ||
690 | |||
691 | if (!core) | ||
692 | return false; | ||
693 | |||
694 | /* | ||
695 | * .is_prepared is optional for clocks that can prepare | ||
696 | * fall back to software usage counter if it is missing | ||
697 | */ | ||
698 | if (!core->ops->is_prepared) { | ||
699 | ret = core->prepare_count ? 1 : 0; | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | ret = core->ops->is_prepared(core->hw); | ||
704 | out: | ||
705 | return !!ret; | ||
706 | } | ||
707 | |||
708 | bool __clk_is_prepared(struct clk *clk) | 414 | bool __clk_is_prepared(struct clk *clk) |
709 | { | 415 | { |
710 | if (!clk) | 416 | if (!clk) |
@@ -713,27 +419,6 @@ bool __clk_is_prepared(struct clk *clk) | |||
713 | return clk_core_is_prepared(clk->core); | 419 | return clk_core_is_prepared(clk->core); |
714 | } | 420 | } |
715 | 421 | ||
716 | static bool clk_core_is_enabled(struct clk_core *core) | ||
717 | { | ||
718 | int ret; | ||
719 | |||
720 | if (!core) | ||
721 | return false; | ||
722 | |||
723 | /* | ||
724 | * .is_enabled is only mandatory for clocks that gate | ||
725 | * fall back to software usage counter if .is_enabled is missing | ||
726 | */ | ||
727 | if (!core->ops->is_enabled) { | ||
728 | ret = core->enable_count ? 1 : 0; | ||
729 | goto out; | ||
730 | } | ||
731 | |||
732 | ret = core->ops->is_enabled(core->hw); | ||
733 | out: | ||
734 | return !!ret; | ||
735 | } | ||
736 | |||
737 | bool __clk_is_enabled(struct clk *clk) | 422 | bool __clk_is_enabled(struct clk *clk) |
738 | { | 423 | { |
739 | if (!clk) | 424 | if (!clk) |
@@ -743,49 +428,6 @@ bool __clk_is_enabled(struct clk *clk) | |||
743 | } | 428 | } |
744 | EXPORT_SYMBOL_GPL(__clk_is_enabled); | 429 | EXPORT_SYMBOL_GPL(__clk_is_enabled); |
745 | 430 | ||
746 | static struct clk_core *__clk_lookup_subtree(const char *name, | ||
747 | struct clk_core *core) | ||
748 | { | ||
749 | struct clk_core *child; | ||
750 | struct clk_core *ret; | ||
751 | |||
752 | if (!strcmp(core->name, name)) | ||
753 | return core; | ||
754 | |||
755 | hlist_for_each_entry(child, &core->children, child_node) { | ||
756 | ret = __clk_lookup_subtree(name, child); | ||
757 | if (ret) | ||
758 | return ret; | ||
759 | } | ||
760 | |||
761 | return NULL; | ||
762 | } | ||
763 | |||
764 | static struct clk_core *clk_core_lookup(const char *name) | ||
765 | { | ||
766 | struct clk_core *root_clk; | ||
767 | struct clk_core *ret; | ||
768 | |||
769 | if (!name) | ||
770 | return NULL; | ||
771 | |||
772 | /* search the 'proper' clk tree first */ | ||
773 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { | ||
774 | ret = __clk_lookup_subtree(name, root_clk); | ||
775 | if (ret) | ||
776 | return ret; | ||
777 | } | ||
778 | |||
779 | /* if not found, then search the orphan tree */ | ||
780 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { | ||
781 | ret = __clk_lookup_subtree(name, root_clk); | ||
782 | if (ret) | ||
783 | return ret; | ||
784 | } | ||
785 | |||
786 | return NULL; | ||
787 | } | ||
788 | |||
789 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, | 431 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, |
790 | unsigned long best, unsigned long flags) | 432 | unsigned long best, unsigned long flags) |
791 | { | 433 | { |
@@ -2190,7 +1832,6 @@ static int clk_core_get_phase(struct clk_core *core) | |||
2190 | 1832 | ||
2191 | return ret; | 1833 | return ret; |
2192 | } | 1834 | } |
2193 | EXPORT_SYMBOL_GPL(clk_get_phase); | ||
2194 | 1835 | ||
2195 | /** | 1836 | /** |
2196 | * clk_get_phase - return the phase shift of a clock signal | 1837 | * clk_get_phase - return the phase shift of a clock signal |
@@ -2206,6 +1847,7 @@ int clk_get_phase(struct clk *clk) | |||
2206 | 1847 | ||
2207 | return clk_core_get_phase(clk->core); | 1848 | return clk_core_get_phase(clk->core); |
2208 | } | 1849 | } |
1850 | EXPORT_SYMBOL_GPL(clk_get_phase); | ||
2209 | 1851 | ||
2210 | /** | 1852 | /** |
2211 | * clk_is_match - check if two clk's point to the same hardware clock | 1853 | * clk_is_match - check if two clk's point to the same hardware clock |
@@ -2233,6 +1875,339 @@ bool clk_is_match(const struct clk *p, const struct clk *q) | |||
2233 | } | 1875 | } |
2234 | EXPORT_SYMBOL_GPL(clk_is_match); | 1876 | EXPORT_SYMBOL_GPL(clk_is_match); |
2235 | 1877 | ||
1878 | /*** debugfs support ***/ | ||
1879 | |||
1880 | #ifdef CONFIG_DEBUG_FS | ||
1881 | #include <linux/debugfs.h> | ||
1882 | |||
1883 | static struct dentry *rootdir; | ||
1884 | static int inited = 0; | ||
1885 | static DEFINE_MUTEX(clk_debug_lock); | ||
1886 | static HLIST_HEAD(clk_debug_list); | ||
1887 | |||
1888 | static struct hlist_head *all_lists[] = { | ||
1889 | &clk_root_list, | ||
1890 | &clk_orphan_list, | ||
1891 | NULL, | ||
1892 | }; | ||
1893 | |||
1894 | static struct hlist_head *orphan_list[] = { | ||
1895 | &clk_orphan_list, | ||
1896 | NULL, | ||
1897 | }; | ||
1898 | |||
1899 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, | ||
1900 | int level) | ||
1901 | { | ||
1902 | if (!c) | ||
1903 | return; | ||
1904 | |||
1905 | seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", | ||
1906 | level * 3 + 1, "", | ||
1907 | 30 - level * 3, c->name, | ||
1908 | c->enable_count, c->prepare_count, clk_core_get_rate(c), | ||
1909 | clk_core_get_accuracy(c), clk_core_get_phase(c)); | ||
1910 | } | ||
1911 | |||
1912 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, | ||
1913 | int level) | ||
1914 | { | ||
1915 | struct clk_core *child; | ||
1916 | |||
1917 | if (!c) | ||
1918 | return; | ||
1919 | |||
1920 | clk_summary_show_one(s, c, level); | ||
1921 | |||
1922 | hlist_for_each_entry(child, &c->children, child_node) | ||
1923 | clk_summary_show_subtree(s, child, level + 1); | ||
1924 | } | ||
1925 | |||
1926 | static int clk_summary_show(struct seq_file *s, void *data) | ||
1927 | { | ||
1928 | struct clk_core *c; | ||
1929 | struct hlist_head **lists = (struct hlist_head **)s->private; | ||
1930 | |||
1931 | seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); | ||
1932 | seq_puts(s, "----------------------------------------------------------------------------------------\n"); | ||
1933 | |||
1934 | clk_prepare_lock(); | ||
1935 | |||
1936 | for (; *lists; lists++) | ||
1937 | hlist_for_each_entry(c, *lists, child_node) | ||
1938 | clk_summary_show_subtree(s, c, 0); | ||
1939 | |||
1940 | clk_prepare_unlock(); | ||
1941 | |||
1942 | return 0; | ||
1943 | } | ||
1944 | |||
1945 | |||
1946 | static int clk_summary_open(struct inode *inode, struct file *file) | ||
1947 | { | ||
1948 | return single_open(file, clk_summary_show, inode->i_private); | ||
1949 | } | ||
1950 | |||
1951 | static const struct file_operations clk_summary_fops = { | ||
1952 | .open = clk_summary_open, | ||
1953 | .read = seq_read, | ||
1954 | .llseek = seq_lseek, | ||
1955 | .release = single_release, | ||
1956 | }; | ||
1957 | |||
1958 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) | ||
1959 | { | ||
1960 | if (!c) | ||
1961 | return; | ||
1962 | |||
1963 | seq_printf(s, "\"%s\": { ", c->name); | ||
1964 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | ||
1965 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | ||
1966 | seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c)); | ||
1967 | seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c)); | ||
1968 | seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); | ||
1969 | } | ||
1970 | |||
1971 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) | ||
1972 | { | ||
1973 | struct clk_core *child; | ||
1974 | |||
1975 | if (!c) | ||
1976 | return; | ||
1977 | |||
1978 | clk_dump_one(s, c, level); | ||
1979 | |||
1980 | hlist_for_each_entry(child, &c->children, child_node) { | ||
1981 | seq_printf(s, ","); | ||
1982 | clk_dump_subtree(s, child, level + 1); | ||
1983 | } | ||
1984 | |||
1985 | seq_printf(s, "}"); | ||
1986 | } | ||
1987 | |||
1988 | static int clk_dump(struct seq_file *s, void *data) | ||
1989 | { | ||
1990 | struct clk_core *c; | ||
1991 | bool first_node = true; | ||
1992 | struct hlist_head **lists = (struct hlist_head **)s->private; | ||
1993 | |||
1994 | seq_printf(s, "{"); | ||
1995 | |||
1996 | clk_prepare_lock(); | ||
1997 | |||
1998 | for (; *lists; lists++) { | ||
1999 | hlist_for_each_entry(c, *lists, child_node) { | ||
2000 | if (!first_node) | ||
2001 | seq_puts(s, ","); | ||
2002 | first_node = false; | ||
2003 | clk_dump_subtree(s, c, 0); | ||
2004 | } | ||
2005 | } | ||
2006 | |||
2007 | clk_prepare_unlock(); | ||
2008 | |||
2009 | seq_printf(s, "}"); | ||
2010 | return 0; | ||
2011 | } | ||
2012 | |||
2013 | |||
2014 | static int clk_dump_open(struct inode *inode, struct file *file) | ||
2015 | { | ||
2016 | return single_open(file, clk_dump, inode->i_private); | ||
2017 | } | ||
2018 | |||
2019 | static const struct file_operations clk_dump_fops = { | ||
2020 | .open = clk_dump_open, | ||
2021 | .read = seq_read, | ||
2022 | .llseek = seq_lseek, | ||
2023 | .release = single_release, | ||
2024 | }; | ||
2025 | |||
2026 | static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) | ||
2027 | { | ||
2028 | struct dentry *d; | ||
2029 | int ret = -ENOMEM; | ||
2030 | |||
2031 | if (!core || !pdentry) { | ||
2032 | ret = -EINVAL; | ||
2033 | goto out; | ||
2034 | } | ||
2035 | |||
2036 | d = debugfs_create_dir(core->name, pdentry); | ||
2037 | if (!d) | ||
2038 | goto out; | ||
2039 | |||
2040 | core->dentry = d; | ||
2041 | |||
2042 | d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, | ||
2043 | (u32 *)&core->rate); | ||
2044 | if (!d) | ||
2045 | goto err_out; | ||
2046 | |||
2047 | d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, | ||
2048 | (u32 *)&core->accuracy); | ||
2049 | if (!d) | ||
2050 | goto err_out; | ||
2051 | |||
2052 | d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, | ||
2053 | (u32 *)&core->phase); | ||
2054 | if (!d) | ||
2055 | goto err_out; | ||
2056 | |||
2057 | d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, | ||
2058 | (u32 *)&core->flags); | ||
2059 | if (!d) | ||
2060 | goto err_out; | ||
2061 | |||
2062 | d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, | ||
2063 | (u32 *)&core->prepare_count); | ||
2064 | if (!d) | ||
2065 | goto err_out; | ||
2066 | |||
2067 | d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, | ||
2068 | (u32 *)&core->enable_count); | ||
2069 | if (!d) | ||
2070 | goto err_out; | ||
2071 | |||
2072 | d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, | ||
2073 | (u32 *)&core->notifier_count); | ||
2074 | if (!d) | ||
2075 | goto err_out; | ||
2076 | |||
2077 | if (core->ops->debug_init) { | ||
2078 | ret = core->ops->debug_init(core->hw, core->dentry); | ||
2079 | if (ret) | ||
2080 | goto err_out; | ||
2081 | } | ||
2082 | |||
2083 | ret = 0; | ||
2084 | goto out; | ||
2085 | |||
2086 | err_out: | ||
2087 | debugfs_remove_recursive(core->dentry); | ||
2088 | core->dentry = NULL; | ||
2089 | out: | ||
2090 | return ret; | ||
2091 | } | ||
2092 | |||
2093 | /** | ||
2094 | * clk_debug_register - add a clk node to the debugfs clk tree | ||
2095 | * @core: the clk being added to the debugfs clk tree | ||
2096 | * | ||
2097 | * Dynamically adds a clk to the debugfs clk tree if debugfs has been | ||
2098 | * initialized. Otherwise it bails out early since the debugfs clk tree | ||
2099 | * will be created lazily by clk_debug_init as part of a late_initcall. | ||
2100 | */ | ||
2101 | static int clk_debug_register(struct clk_core *core) | ||
2102 | { | ||
2103 | int ret = 0; | ||
2104 | |||
2105 | mutex_lock(&clk_debug_lock); | ||
2106 | hlist_add_head(&core->debug_node, &clk_debug_list); | ||
2107 | |||
2108 | if (!inited) | ||
2109 | goto unlock; | ||
2110 | |||
2111 | ret = clk_debug_create_one(core, rootdir); | ||
2112 | unlock: | ||
2113 | mutex_unlock(&clk_debug_lock); | ||
2114 | |||
2115 | return ret; | ||
2116 | } | ||
2117 | |||
2118 | /** | ||
2119 | * clk_debug_unregister - remove a clk node from the debugfs clk tree | ||
2120 | * @core: the clk being removed from the debugfs clk tree | ||
2121 | * | ||
2122 | * Dynamically removes a clk and all it's children clk nodes from the | ||
2123 | * debugfs clk tree if clk->dentry points to debugfs created by | ||
2124 | * clk_debug_register in __clk_init. | ||
2125 | */ | ||
2126 | static void clk_debug_unregister(struct clk_core *core) | ||
2127 | { | ||
2128 | mutex_lock(&clk_debug_lock); | ||
2129 | hlist_del_init(&core->debug_node); | ||
2130 | debugfs_remove_recursive(core->dentry); | ||
2131 | core->dentry = NULL; | ||
2132 | mutex_unlock(&clk_debug_lock); | ||
2133 | } | ||
2134 | |||
2135 | struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, | ||
2136 | void *data, const struct file_operations *fops) | ||
2137 | { | ||
2138 | struct dentry *d = NULL; | ||
2139 | |||
2140 | if (hw->core->dentry) | ||
2141 | d = debugfs_create_file(name, mode, hw->core->dentry, data, | ||
2142 | fops); | ||
2143 | |||
2144 | return d; | ||
2145 | } | ||
2146 | EXPORT_SYMBOL_GPL(clk_debugfs_add_file); | ||
2147 | |||
2148 | /** | ||
2149 | * clk_debug_init - lazily create the debugfs clk tree visualization | ||
2150 | * | ||
2151 | * clks are often initialized very early during boot before memory can | ||
2152 | * be dynamically allocated and well before debugfs is setup. | ||
2153 | * clk_debug_init walks the clk tree hierarchy while holding | ||
2154 | * prepare_lock and creates the topology as part of a late_initcall, | ||
2155 | * thus insuring that clks initialized very early will still be | ||
2156 | * represented in the debugfs clk tree. This function should only be | ||
2157 | * called once at boot-time, and all other clks added dynamically will | ||
2158 | * be done so with clk_debug_register. | ||
2159 | */ | ||
2160 | static int __init clk_debug_init(void) | ||
2161 | { | ||
2162 | struct clk_core *core; | ||
2163 | struct dentry *d; | ||
2164 | |||
2165 | rootdir = debugfs_create_dir("clk", NULL); | ||
2166 | |||
2167 | if (!rootdir) | ||
2168 | return -ENOMEM; | ||
2169 | |||
2170 | d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, | ||
2171 | &clk_summary_fops); | ||
2172 | if (!d) | ||
2173 | return -ENOMEM; | ||
2174 | |||
2175 | d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, | ||
2176 | &clk_dump_fops); | ||
2177 | if (!d) | ||
2178 | return -ENOMEM; | ||
2179 | |||
2180 | d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, | ||
2181 | &orphan_list, &clk_summary_fops); | ||
2182 | if (!d) | ||
2183 | return -ENOMEM; | ||
2184 | |||
2185 | d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, | ||
2186 | &orphan_list, &clk_dump_fops); | ||
2187 | if (!d) | ||
2188 | return -ENOMEM; | ||
2189 | |||
2190 | mutex_lock(&clk_debug_lock); | ||
2191 | hlist_for_each_entry(core, &clk_debug_list, debug_node) | ||
2192 | clk_debug_create_one(core, rootdir); | ||
2193 | |||
2194 | inited = 1; | ||
2195 | mutex_unlock(&clk_debug_lock); | ||
2196 | |||
2197 | return 0; | ||
2198 | } | ||
2199 | late_initcall(clk_debug_init); | ||
2200 | #else | ||
2201 | static inline int clk_debug_register(struct clk_core *core) { return 0; } | ||
2202 | static inline void clk_debug_reparent(struct clk_core *core, | ||
2203 | struct clk_core *new_parent) | ||
2204 | { | ||
2205 | } | ||
2206 | static inline void clk_debug_unregister(struct clk_core *core) | ||
2207 | { | ||
2208 | } | ||
2209 | #endif | ||
2210 | |||
2236 | /** | 2211 | /** |
2237 | * __clk_init - initialize the data structures in a struct clk | 2212 | * __clk_init - initialize the data structures in a struct clk |
2238 | * @dev: device initializing this clk, placeholder for now | 2213 | * @dev: device initializing this clk, placeholder for now |