diff options
Diffstat (limited to 'arch/arm/mach-tegra/clock.c')
-rw-r--r-- | arch/arm/mach-tegra/clock.c | 570 |
1 files changed, 31 insertions, 539 deletions
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index 58f981c0819c..632133fc985b 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Copyright (C) 2010 Google, Inc. | 3 | * Copyright (C) 2010 Google, Inc. |
4 | * Copyright (c) 2012 NVIDIA CORPORATION. All rights reserved. | ||
4 | * | 5 | * |
5 | * Author: | 6 | * Author: |
6 | * Colin Cross <ccross@google.com> | 7 | * Colin Cross <ccross@google.com> |
@@ -19,8 +20,6 @@ | |||
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
20 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
21 | #include <linux/clkdev.h> | 22 | #include <linux/clkdev.h> |
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/init.h> | 23 | #include <linux/init.h> |
25 | #include <linux/list.h> | 24 | #include <linux/list.h> |
26 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -36,321 +35,67 @@ | |||
36 | /* | 35 | /* |
37 | * Locking: | 36 | * Locking: |
38 | * | 37 | * |
39 | * Each struct clk has a spinlock. | ||
40 | * | ||
41 | * To avoid AB-BA locking problems, locks must always be traversed from child | ||
42 | * clock to parent clock. For example, when enabling a clock, the clock's lock | ||
43 | * is taken, and then clk_enable is called on the parent, which take's the | ||
44 | * parent clock's lock. There is one exceptions to this ordering: When dumping | ||
45 | * the clock tree through debugfs. In this case, clk_lock_all is called, | ||
46 | * which attemps to iterate through the entire list of clocks and take every | ||
47 | * clock lock. If any call to spin_trylock fails, all locked clocks are | ||
48 | * unlocked, and the process is retried. When all the locks are held, | ||
49 | * the only clock operation that can be called is clk_get_rate_all_locked. | ||
50 | * | ||
51 | * Within a single clock, no clock operation can call another clock operation | ||
52 | * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any | ||
53 | * clock operation can call any other clock operation on any of it's possible | ||
54 | * parents. | ||
55 | * | ||
56 | * An additional mutex, clock_list_lock, is used to protect the list of all | 38 | * An additional mutex, clock_list_lock, is used to protect the list of all |
57 | * clocks. | 39 | * clocks. |
58 | * | 40 | * |
59 | * The clock operations must lock internally to protect against | ||
60 | * read-modify-write on registers that are shared by multiple clocks | ||
61 | */ | 41 | */ |
62 | static DEFINE_MUTEX(clock_list_lock); | 42 | static DEFINE_MUTEX(clock_list_lock); |
63 | static LIST_HEAD(clocks); | 43 | static LIST_HEAD(clocks); |
64 | 44 | ||
65 | struct clk *tegra_get_clock_by_name(const char *name) | 45 | void tegra_clk_add(struct clk *clk) |
66 | { | 46 | { |
67 | struct clk *c; | 47 | struct clk_tegra *c = to_clk_tegra(__clk_get_hw(clk)); |
68 | struct clk *ret = NULL; | ||
69 | mutex_lock(&clock_list_lock); | ||
70 | list_for_each_entry(c, &clocks, node) { | ||
71 | if (strcmp(c->name, name) == 0) { | ||
72 | ret = c; | ||
73 | break; | ||
74 | } | ||
75 | } | ||
76 | mutex_unlock(&clock_list_lock); | ||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | /* Must be called with c->spinlock held */ | ||
81 | static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p) | ||
82 | { | ||
83 | u64 rate; | ||
84 | |||
85 | rate = clk_get_rate(p); | ||
86 | |||
87 | if (c->mul != 0 && c->div != 0) { | ||
88 | rate *= c->mul; | ||
89 | rate += c->div - 1; /* round up */ | ||
90 | do_div(rate, c->div); | ||
91 | } | ||
92 | |||
93 | return rate; | ||
94 | } | ||
95 | |||
96 | /* Must be called with c->spinlock held */ | ||
97 | unsigned long clk_get_rate_locked(struct clk *c) | ||
98 | { | ||
99 | unsigned long rate; | ||
100 | |||
101 | if (c->parent) | ||
102 | rate = clk_predict_rate_from_parent(c, c->parent); | ||
103 | else | ||
104 | rate = c->rate; | ||
105 | |||
106 | return rate; | ||
107 | } | ||
108 | |||
109 | unsigned long clk_get_rate(struct clk *c) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | unsigned long rate; | ||
113 | |||
114 | spin_lock_irqsave(&c->spinlock, flags); | ||
115 | |||
116 | rate = clk_get_rate_locked(c); | ||
117 | |||
118 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
119 | |||
120 | return rate; | ||
121 | } | ||
122 | EXPORT_SYMBOL(clk_get_rate); | ||
123 | |||
124 | int clk_reparent(struct clk *c, struct clk *parent) | ||
125 | { | ||
126 | c->parent = parent; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void clk_init(struct clk *c) | ||
131 | { | ||
132 | spin_lock_init(&c->spinlock); | ||
133 | |||
134 | if (c->ops && c->ops->init) | ||
135 | c->ops->init(c); | ||
136 | |||
137 | if (!c->ops || !c->ops->enable) { | ||
138 | c->refcnt++; | ||
139 | c->set = true; | ||
140 | if (c->parent) | ||
141 | c->state = c->parent->state; | ||
142 | else | ||
143 | c->state = ON; | ||
144 | } | ||
145 | 48 | ||
146 | mutex_lock(&clock_list_lock); | 49 | mutex_lock(&clock_list_lock); |
147 | list_add(&c->node, &clocks); | 50 | list_add(&c->node, &clocks); |
148 | mutex_unlock(&clock_list_lock); | 51 | mutex_unlock(&clock_list_lock); |
149 | } | 52 | } |
150 | 53 | ||
151 | int clk_enable(struct clk *c) | 54 | struct clk *tegra_get_clock_by_name(const char *name) |
152 | { | ||
153 | int ret = 0; | ||
154 | unsigned long flags; | ||
155 | |||
156 | spin_lock_irqsave(&c->spinlock, flags); | ||
157 | |||
158 | if (c->refcnt == 0) { | ||
159 | if (c->parent) { | ||
160 | ret = clk_enable(c->parent); | ||
161 | if (ret) | ||
162 | goto out; | ||
163 | } | ||
164 | |||
165 | if (c->ops && c->ops->enable) { | ||
166 | ret = c->ops->enable(c); | ||
167 | if (ret) { | ||
168 | if (c->parent) | ||
169 | clk_disable(c->parent); | ||
170 | goto out; | ||
171 | } | ||
172 | c->state = ON; | ||
173 | c->set = true; | ||
174 | } | ||
175 | } | ||
176 | c->refcnt++; | ||
177 | out: | ||
178 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
179 | return ret; | ||
180 | } | ||
181 | EXPORT_SYMBOL(clk_enable); | ||
182 | |||
183 | void clk_disable(struct clk *c) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | |||
187 | spin_lock_irqsave(&c->spinlock, flags); | ||
188 | |||
189 | if (c->refcnt == 0) { | ||
190 | WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); | ||
191 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
192 | return; | ||
193 | } | ||
194 | if (c->refcnt == 1) { | ||
195 | if (c->ops && c->ops->disable) | ||
196 | c->ops->disable(c); | ||
197 | |||
198 | if (c->parent) | ||
199 | clk_disable(c->parent); | ||
200 | |||
201 | c->state = OFF; | ||
202 | } | ||
203 | c->refcnt--; | ||
204 | |||
205 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
206 | } | ||
207 | EXPORT_SYMBOL(clk_disable); | ||
208 | |||
209 | int clk_set_parent(struct clk *c, struct clk *parent) | ||
210 | { | ||
211 | int ret; | ||
212 | unsigned long flags; | ||
213 | unsigned long new_rate; | ||
214 | unsigned long old_rate; | ||
215 | |||
216 | spin_lock_irqsave(&c->spinlock, flags); | ||
217 | |||
218 | if (!c->ops || !c->ops->set_parent) { | ||
219 | ret = -ENOSYS; | ||
220 | goto out; | ||
221 | } | ||
222 | |||
223 | new_rate = clk_predict_rate_from_parent(c, parent); | ||
224 | old_rate = clk_get_rate_locked(c); | ||
225 | |||
226 | ret = c->ops->set_parent(c, parent); | ||
227 | if (ret) | ||
228 | goto out; | ||
229 | |||
230 | out: | ||
231 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
232 | return ret; | ||
233 | } | ||
234 | EXPORT_SYMBOL(clk_set_parent); | ||
235 | |||
236 | struct clk *clk_get_parent(struct clk *c) | ||
237 | { | ||
238 | return c->parent; | ||
239 | } | ||
240 | EXPORT_SYMBOL(clk_get_parent); | ||
241 | |||
242 | int clk_set_rate_locked(struct clk *c, unsigned long rate) | ||
243 | { | ||
244 | long new_rate; | ||
245 | |||
246 | if (!c->ops || !c->ops->set_rate) | ||
247 | return -ENOSYS; | ||
248 | |||
249 | if (rate > c->max_rate) | ||
250 | rate = c->max_rate; | ||
251 | |||
252 | if (c->ops && c->ops->round_rate) { | ||
253 | new_rate = c->ops->round_rate(c, rate); | ||
254 | |||
255 | if (new_rate < 0) | ||
256 | return new_rate; | ||
257 | |||
258 | rate = new_rate; | ||
259 | } | ||
260 | |||
261 | return c->ops->set_rate(c, rate); | ||
262 | } | ||
263 | |||
264 | int clk_set_rate(struct clk *c, unsigned long rate) | ||
265 | { | ||
266 | int ret; | ||
267 | unsigned long flags; | ||
268 | |||
269 | spin_lock_irqsave(&c->spinlock, flags); | ||
270 | |||
271 | ret = clk_set_rate_locked(c, rate); | ||
272 | |||
273 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | EXPORT_SYMBOL(clk_set_rate); | ||
278 | |||
279 | |||
280 | /* Must be called with clocks lock and all indvidual clock locks held */ | ||
281 | unsigned long clk_get_rate_all_locked(struct clk *c) | ||
282 | { | 55 | { |
283 | u64 rate; | 56 | struct clk_tegra *c; |
284 | int mul = 1; | 57 | struct clk *ret = NULL; |
285 | int div = 1; | 58 | mutex_lock(&clock_list_lock); |
286 | struct clk *p = c; | 59 | list_for_each_entry(c, &clocks, node) { |
287 | 60 | if (strcmp(__clk_get_name(c->hw.clk), name) == 0) { | |
288 | while (p) { | 61 | ret = c->hw.clk; |
289 | c = p; | 62 | break; |
290 | if (c->mul != 0 && c->div != 0) { | ||
291 | mul *= c->mul; | ||
292 | div *= c->div; | ||
293 | } | 63 | } |
294 | p = c->parent; | ||
295 | } | 64 | } |
296 | 65 | mutex_unlock(&clock_list_lock); | |
297 | rate = c->rate; | ||
298 | rate *= mul; | ||
299 | do_div(rate, div); | ||
300 | |||
301 | return rate; | ||
302 | } | ||
303 | |||
304 | long clk_round_rate(struct clk *c, unsigned long rate) | ||
305 | { | ||
306 | unsigned long flags; | ||
307 | long ret; | ||
308 | |||
309 | spin_lock_irqsave(&c->spinlock, flags); | ||
310 | |||
311 | if (!c->ops || !c->ops->round_rate) { | ||
312 | ret = -ENOSYS; | ||
313 | goto out; | ||
314 | } | ||
315 | |||
316 | if (rate > c->max_rate) | ||
317 | rate = c->max_rate; | ||
318 | |||
319 | ret = c->ops->round_rate(c, rate); | ||
320 | |||
321 | out: | ||
322 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
323 | return ret; | 66 | return ret; |
324 | } | 67 | } |
325 | EXPORT_SYMBOL(clk_round_rate); | ||
326 | 68 | ||
327 | static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table) | 69 | static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table) |
328 | { | 70 | { |
329 | struct clk *c; | 71 | struct clk *c; |
330 | struct clk *p; | 72 | struct clk *p; |
73 | struct clk *parent; | ||
331 | 74 | ||
332 | int ret = 0; | 75 | int ret = 0; |
333 | 76 | ||
334 | c = tegra_get_clock_by_name(table->name); | 77 | c = tegra_get_clock_by_name(table->name); |
335 | 78 | ||
336 | if (!c) { | 79 | if (!c) { |
337 | pr_warning("Unable to initialize clock %s\n", | 80 | pr_warn("Unable to initialize clock %s\n", |
338 | table->name); | 81 | table->name); |
339 | return -ENODEV; | 82 | return -ENODEV; |
340 | } | 83 | } |
341 | 84 | ||
85 | parent = clk_get_parent(c); | ||
86 | |||
342 | if (table->parent) { | 87 | if (table->parent) { |
343 | p = tegra_get_clock_by_name(table->parent); | 88 | p = tegra_get_clock_by_name(table->parent); |
344 | if (!p) { | 89 | if (!p) { |
345 | pr_warning("Unable to find parent %s of clock %s\n", | 90 | pr_warn("Unable to find parent %s of clock %s\n", |
346 | table->parent, table->name); | 91 | table->parent, table->name); |
347 | return -ENODEV; | 92 | return -ENODEV; |
348 | } | 93 | } |
349 | 94 | ||
350 | if (c->parent != p) { | 95 | if (parent != p) { |
351 | ret = clk_set_parent(c, p); | 96 | ret = clk_set_parent(c, p); |
352 | if (ret) { | 97 | if (ret) { |
353 | pr_warning("Unable to set parent %s of clock %s: %d\n", | 98 | pr_warn("Unable to set parent %s of clock %s: %d\n", |
354 | table->parent, table->name, ret); | 99 | table->parent, table->name, ret); |
355 | return -EINVAL; | 100 | return -EINVAL; |
356 | } | 101 | } |
@@ -360,16 +105,16 @@ static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table) | |||
360 | if (table->rate && table->rate != clk_get_rate(c)) { | 105 | if (table->rate && table->rate != clk_get_rate(c)) { |
361 | ret = clk_set_rate(c, table->rate); | 106 | ret = clk_set_rate(c, table->rate); |
362 | if (ret) { | 107 | if (ret) { |
363 | pr_warning("Unable to set clock %s to rate %lu: %d\n", | 108 | pr_warn("Unable to set clock %s to rate %lu: %d\n", |
364 | table->name, table->rate, ret); | 109 | table->name, table->rate, ret); |
365 | return -EINVAL; | 110 | return -EINVAL; |
366 | } | 111 | } |
367 | } | 112 | } |
368 | 113 | ||
369 | if (table->enabled) { | 114 | if (table->enabled) { |
370 | ret = clk_enable(c); | 115 | ret = clk_prepare_enable(c); |
371 | if (ret) { | 116 | if (ret) { |
372 | pr_warning("Unable to enable clock %s: %d\n", | 117 | pr_warn("Unable to enable clock %s: %d\n", |
373 | table->name, ret); | 118 | table->name, ret); |
374 | return -EINVAL; | 119 | return -EINVAL; |
375 | } | 120 | } |
@@ -383,19 +128,20 @@ void tegra_clk_init_from_table(struct tegra_clk_init_table *table) | |||
383 | for (; table->name; table++) | 128 | for (; table->name; table++) |
384 | tegra_clk_init_one_from_table(table); | 129 | tegra_clk_init_one_from_table(table); |
385 | } | 130 | } |
386 | EXPORT_SYMBOL(tegra_clk_init_from_table); | ||
387 | 131 | ||
388 | void tegra_periph_reset_deassert(struct clk *c) | 132 | void tegra_periph_reset_deassert(struct clk *c) |
389 | { | 133 | { |
390 | BUG_ON(!c->ops->reset); | 134 | struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c)); |
391 | c->ops->reset(c, false); | 135 | BUG_ON(!clk->reset); |
136 | clk->reset(__clk_get_hw(c), false); | ||
392 | } | 137 | } |
393 | EXPORT_SYMBOL(tegra_periph_reset_deassert); | 138 | EXPORT_SYMBOL(tegra_periph_reset_deassert); |
394 | 139 | ||
395 | void tegra_periph_reset_assert(struct clk *c) | 140 | void tegra_periph_reset_assert(struct clk *c) |
396 | { | 141 | { |
397 | BUG_ON(!c->ops->reset); | 142 | struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c)); |
398 | c->ops->reset(c, true); | 143 | BUG_ON(!clk->reset); |
144 | clk->reset(__clk_get_hw(c), true); | ||
399 | } | 145 | } |
400 | EXPORT_SYMBOL(tegra_periph_reset_assert); | 146 | EXPORT_SYMBOL(tegra_periph_reset_assert); |
401 | 147 | ||
@@ -405,268 +151,14 @@ EXPORT_SYMBOL(tegra_periph_reset_assert); | |||
405 | int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) | 151 | int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting) |
406 | { | 152 | { |
407 | int ret = 0; | 153 | int ret = 0; |
408 | unsigned long flags; | 154 | struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c)); |
409 | 155 | ||
410 | spin_lock_irqsave(&c->spinlock, flags); | 156 | if (!clk->clk_cfg_ex) { |
411 | |||
412 | if (!c->ops || !c->ops->clk_cfg_ex) { | ||
413 | ret = -ENOSYS; | 157 | ret = -ENOSYS; |
414 | goto out; | 158 | goto out; |
415 | } | 159 | } |
416 | ret = c->ops->clk_cfg_ex(c, p, setting); | 160 | ret = clk->clk_cfg_ex(__clk_get_hw(c), p, setting); |
417 | 161 | ||
418 | out: | 162 | out: |
419 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
420 | |||
421 | return ret; | 163 | return ret; |
422 | } | 164 | } |
423 | |||
424 | #ifdef CONFIG_DEBUG_FS | ||
425 | |||
426 | static int __clk_lock_all_spinlocks(void) | ||
427 | { | ||
428 | struct clk *c; | ||
429 | |||
430 | list_for_each_entry(c, &clocks, node) | ||
431 | if (!spin_trylock(&c->spinlock)) | ||
432 | goto unlock_spinlocks; | ||
433 | |||
434 | return 0; | ||
435 | |||
436 | unlock_spinlocks: | ||
437 | list_for_each_entry_continue_reverse(c, &clocks, node) | ||
438 | spin_unlock(&c->spinlock); | ||
439 | |||
440 | return -EAGAIN; | ||
441 | } | ||
442 | |||
443 | static void __clk_unlock_all_spinlocks(void) | ||
444 | { | ||
445 | struct clk *c; | ||
446 | |||
447 | list_for_each_entry_reverse(c, &clocks, node) | ||
448 | spin_unlock(&c->spinlock); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * This function retries until it can take all locks, and may take | ||
453 | * an arbitrarily long time to complete. | ||
454 | * Must be called with irqs enabled, returns with irqs disabled | ||
455 | * Must be called with clock_list_lock held | ||
456 | */ | ||
457 | static void clk_lock_all(void) | ||
458 | { | ||
459 | int ret; | ||
460 | retry: | ||
461 | local_irq_disable(); | ||
462 | |||
463 | ret = __clk_lock_all_spinlocks(); | ||
464 | if (ret) | ||
465 | goto failed_spinlocks; | ||
466 | |||
467 | /* All locks taken successfully, return */ | ||
468 | return; | ||
469 | |||
470 | failed_spinlocks: | ||
471 | local_irq_enable(); | ||
472 | yield(); | ||
473 | goto retry; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Unlocks all clocks after a clk_lock_all | ||
478 | * Must be called with irqs disabled, returns with irqs enabled | ||
479 | * Must be called with clock_list_lock held | ||
480 | */ | ||
481 | static void clk_unlock_all(void) | ||
482 | { | ||
483 | __clk_unlock_all_spinlocks(); | ||
484 | |||
485 | local_irq_enable(); | ||
486 | } | ||
487 | |||
488 | static struct dentry *clk_debugfs_root; | ||
489 | |||
490 | |||
491 | static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | ||
492 | { | ||
493 | struct clk *child; | ||
494 | const char *state = "uninit"; | ||
495 | char div[8] = {0}; | ||
496 | |||
497 | if (c->state == ON) | ||
498 | state = "on"; | ||
499 | else if (c->state == OFF) | ||
500 | state = "off"; | ||
501 | |||
502 | if (c->mul != 0 && c->div != 0) { | ||
503 | if (c->mul > c->div) { | ||
504 | int mul = c->mul / c->div; | ||
505 | int mul2 = (c->mul * 10 / c->div) % 10; | ||
506 | int mul3 = (c->mul * 10) % c->div; | ||
507 | if (mul2 == 0 && mul3 == 0) | ||
508 | snprintf(div, sizeof(div), "x%d", mul); | ||
509 | else if (mul3 == 0) | ||
510 | snprintf(div, sizeof(div), "x%d.%d", mul, mul2); | ||
511 | else | ||
512 | snprintf(div, sizeof(div), "x%d.%d..", mul, mul2); | ||
513 | } else { | ||
514 | snprintf(div, sizeof(div), "%d%s", c->div / c->mul, | ||
515 | (c->div % c->mul) ? ".5" : ""); | ||
516 | } | ||
517 | } | ||
518 | |||
519 | seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n", | ||
520 | level * 3 + 1, "", | ||
521 | c->rate > c->max_rate ? '!' : ' ', | ||
522 | !c->set ? '*' : ' ', | ||
523 | 30 - level * 3, c->name, | ||
524 | state, c->refcnt, div, clk_get_rate_all_locked(c)); | ||
525 | |||
526 | list_for_each_entry(child, &clocks, node) { | ||
527 | if (child->parent != c) | ||
528 | continue; | ||
529 | |||
530 | clock_tree_show_one(s, child, level + 1); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static int clock_tree_show(struct seq_file *s, void *data) | ||
535 | { | ||
536 | struct clk *c; | ||
537 | seq_printf(s, " clock state ref div rate\n"); | ||
538 | seq_printf(s, "--------------------------------------------------------------\n"); | ||
539 | |||
540 | mutex_lock(&clock_list_lock); | ||
541 | |||
542 | clk_lock_all(); | ||
543 | |||
544 | list_for_each_entry(c, &clocks, node) | ||
545 | if (c->parent == NULL) | ||
546 | clock_tree_show_one(s, c, 0); | ||
547 | |||
548 | clk_unlock_all(); | ||
549 | |||
550 | mutex_unlock(&clock_list_lock); | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | static int clock_tree_open(struct inode *inode, struct file *file) | ||
555 | { | ||
556 | return single_open(file, clock_tree_show, inode->i_private); | ||
557 | } | ||
558 | |||
559 | static const struct file_operations clock_tree_fops = { | ||
560 | .open = clock_tree_open, | ||
561 | .read = seq_read, | ||
562 | .llseek = seq_lseek, | ||
563 | .release = single_release, | ||
564 | }; | ||
565 | |||
566 | static int possible_parents_show(struct seq_file *s, void *data) | ||
567 | { | ||
568 | struct clk *c = s->private; | ||
569 | int i; | ||
570 | |||
571 | for (i = 0; c->inputs[i].input; i++) { | ||
572 | char *first = (i == 0) ? "" : " "; | ||
573 | seq_printf(s, "%s%s", first, c->inputs[i].input->name); | ||
574 | } | ||
575 | seq_printf(s, "\n"); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static int possible_parents_open(struct inode *inode, struct file *file) | ||
580 | { | ||
581 | return single_open(file, possible_parents_show, inode->i_private); | ||
582 | } | ||
583 | |||
584 | static const struct file_operations possible_parents_fops = { | ||
585 | .open = possible_parents_open, | ||
586 | .read = seq_read, | ||
587 | .llseek = seq_lseek, | ||
588 | .release = single_release, | ||
589 | }; | ||
590 | |||
591 | static int clk_debugfs_register_one(struct clk *c) | ||
592 | { | ||
593 | struct dentry *d; | ||
594 | |||
595 | d = debugfs_create_dir(c->name, clk_debugfs_root); | ||
596 | if (!d) | ||
597 | return -ENOMEM; | ||
598 | c->dent = d; | ||
599 | |||
600 | d = debugfs_create_u8("refcnt", S_IRUGO, c->dent, (u8 *)&c->refcnt); | ||
601 | if (!d) | ||
602 | goto err_out; | ||
603 | |||
604 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); | ||
605 | if (!d) | ||
606 | goto err_out; | ||
607 | |||
608 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); | ||
609 | if (!d) | ||
610 | goto err_out; | ||
611 | |||
612 | if (c->inputs) { | ||
613 | d = debugfs_create_file("possible_parents", S_IRUGO, c->dent, | ||
614 | c, &possible_parents_fops); | ||
615 | if (!d) | ||
616 | goto err_out; | ||
617 | } | ||
618 | |||
619 | return 0; | ||
620 | |||
621 | err_out: | ||
622 | debugfs_remove_recursive(c->dent); | ||
623 | return -ENOMEM; | ||
624 | } | ||
625 | |||
626 | static int clk_debugfs_register(struct clk *c) | ||
627 | { | ||
628 | int err; | ||
629 | struct clk *pa = c->parent; | ||
630 | |||
631 | if (pa && !pa->dent) { | ||
632 | err = clk_debugfs_register(pa); | ||
633 | if (err) | ||
634 | return err; | ||
635 | } | ||
636 | |||
637 | if (!c->dent) { | ||
638 | err = clk_debugfs_register_one(c); | ||
639 | if (err) | ||
640 | return err; | ||
641 | } | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | int __init tegra_clk_debugfs_init(void) | ||
646 | { | ||
647 | struct clk *c; | ||
648 | struct dentry *d; | ||
649 | int err = -ENOMEM; | ||
650 | |||
651 | d = debugfs_create_dir("clock", NULL); | ||
652 | if (!d) | ||
653 | return -ENOMEM; | ||
654 | clk_debugfs_root = d; | ||
655 | |||
656 | d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL, | ||
657 | &clock_tree_fops); | ||
658 | if (!d) | ||
659 | goto err_out; | ||
660 | |||
661 | list_for_each_entry(c, &clocks, node) { | ||
662 | err = clk_debugfs_register(c); | ||
663 | if (err) | ||
664 | goto err_out; | ||
665 | } | ||
666 | return 0; | ||
667 | err_out: | ||
668 | debugfs_remove_recursive(clk_debugfs_root); | ||
669 | return err; | ||
670 | } | ||
671 | |||
672 | #endif | ||