aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/clock.c')
-rw-r--r--arch/arm/mach-tegra/clock.c532
1 files changed, 247 insertions, 285 deletions
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 77948e0f4909..e028320ab423 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -18,238 +18,177 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/list.h> 21#include <linux/clkdev.h>
22#include <linux/debugfs.h>
23#include <linux/delay.h>
22#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/list.h>
23#include <linux/module.h> 26#include <linux/module.h>
24#include <linux/debugfs.h> 27#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/seq_file.h> 28#include <linux/seq_file.h>
27#include <linux/regulator/consumer.h> 29#include <linux/slab.h>
28#include <linux/clkdev.h> 30
31#include <mach/clk.h>
29 32
30#include "clock.h"
31#include "board.h" 33#include "board.h"
32#include "fuse.h" 34#include "clock.h"
33 35
36/*
37 * Locking:
38 *
39 * Each struct clk has a spinlock.
40 *
41 * To avoid AB-BA locking problems, locks must always be traversed from child
42 * clock to parent clock. For example, when enabling a clock, the clock's lock
43 * is taken, and then clk_enable is called on the parent, which take's the
44 * parent clock's lock. There is one exceptions to this ordering: When dumping
45 * the clock tree through debugfs. In this case, clk_lock_all is called,
46 * which attemps to iterate through the entire list of clocks and take every
47 * clock lock. If any call to spin_trylock fails, all locked clocks are
48 * unlocked, and the process is retried. When all the locks are held,
49 * the only clock operation that can be called is clk_get_rate_all_locked.
50 *
51 * Within a single clock, no clock operation can call another clock operation
52 * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any
53 * clock operation can call any other clock operation on any of it's possible
54 * parents.
55 *
56 * An additional mutex, clock_list_lock, is used to protect the list of all
57 * clocks.
58 *
59 * The clock operations must lock internally to protect against
60 * read-modify-write on registers that are shared by multiple clocks
61 */
62static DEFINE_MUTEX(clock_list_lock);
34static LIST_HEAD(clocks); 63static LIST_HEAD(clocks);
35 64
36static DEFINE_SPINLOCK(clock_lock);
37static DEFINE_MUTEX(dvfs_lock);
38
39static int clk_is_dvfs(struct clk *c)
40{
41 return (c->dvfs != NULL);
42};
43
44static int dvfs_set_rate(struct dvfs *d, unsigned long rate)
45{
46 struct dvfs_table *t;
47
48 if (d->table == NULL)
49 return -ENODEV;
50
51 for (t = d->table; t->rate != 0; t++) {
52 if (rate <= t->rate) {
53 if (!d->reg)
54 return 0;
55
56 return regulator_set_voltage(d->reg,
57 t->millivolts * 1000,
58 d->max_millivolts * 1000);
59 }
60 }
61
62 return -EINVAL;
63}
64
65static void dvfs_init(struct clk *c)
66{
67 int process_id;
68 int i;
69 struct dvfs_table *table;
70
71 process_id = c->dvfs->cpu ? tegra_core_process_id() :
72 tegra_cpu_process_id();
73
74 for (i = 0; i < c->dvfs->process_id_table_length; i++)
75 if (process_id == c->dvfs->process_id_table[i].process_id)
76 c->dvfs->table = c->dvfs->process_id_table[i].table;
77
78 if (c->dvfs->table == NULL) {
79 pr_err("Failed to find dvfs table for clock %s process %d\n",
80 c->name, process_id);
81 return;
82 }
83
84 c->dvfs->max_millivolts = 0;
85 for (table = c->dvfs->table; table->rate != 0; table++)
86 if (c->dvfs->max_millivolts < table->millivolts)
87 c->dvfs->max_millivolts = table->millivolts;
88
89 c->dvfs->reg = regulator_get(NULL, c->dvfs->reg_id);
90
91 if (IS_ERR(c->dvfs->reg)) {
92 pr_err("Failed to get regulator %s for clock %s\n",
93 c->dvfs->reg_id, c->name);
94 c->dvfs->reg = NULL;
95 return;
96 }
97
98 if (c->refcnt > 0)
99 dvfs_set_rate(c->dvfs, c->rate);
100}
101
102struct clk *tegra_get_clock_by_name(const char *name) 65struct clk *tegra_get_clock_by_name(const char *name)
103{ 66{
104 struct clk *c; 67 struct clk *c;
105 struct clk *ret = NULL; 68 struct clk *ret = NULL;
106 unsigned long flags; 69 mutex_lock(&clock_list_lock);
107 spin_lock_irqsave(&clock_lock, flags);
108 list_for_each_entry(c, &clocks, node) { 70 list_for_each_entry(c, &clocks, node) {
109 if (strcmp(c->name, name) == 0) { 71 if (strcmp(c->name, name) == 0) {
110 ret = c; 72 ret = c;
111 break; 73 break;
112 } 74 }
113 } 75 }
114 spin_unlock_irqrestore(&clock_lock, flags); 76 mutex_unlock(&clock_list_lock);
115 return ret; 77 return ret;
116} 78}
117 79
118static void clk_recalculate_rate(struct clk *c) 80/* Must be called with c->spinlock held */
81static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
119{ 82{
120 u64 rate; 83 u64 rate;
121 84
122 if (!c->parent) 85 rate = clk_get_rate(p);
123 return;
124
125 rate = c->parent->rate;
126 86
127 if (c->mul != 0 && c->div != 0) { 87 if (c->mul != 0 && c->div != 0) {
128 rate = rate * c->mul; 88 rate *= c->mul;
89 rate += c->div - 1; /* round up */
129 do_div(rate, c->div); 90 do_div(rate, c->div);
130 } 91 }
131 92
132 if (rate > c->max_rate) 93 return rate;
133 pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n",
134 c->name, rate, c->max_rate);
135
136 c->rate = rate;
137} 94}
138 95
139int clk_reparent(struct clk *c, struct clk *parent) 96/* Must be called with c->spinlock held */
97unsigned long clk_get_rate_locked(struct clk *c)
140{ 98{
141 pr_debug("%s: %s\n", __func__, c->name); 99 unsigned long rate;
142 c->parent = parent;
143 list_del(&c->sibling);
144 list_add_tail(&c->sibling, &parent->children);
145 return 0;
146}
147 100
148static void propagate_rate(struct clk *c) 101 if (c->parent)
149{ 102 rate = clk_predict_rate_from_parent(c, c->parent);
150 struct clk *clkp; 103 else
151 pr_debug("%s: %s\n", __func__, c->name); 104 rate = c->rate;
152 list_for_each_entry(clkp, &c->children, sibling) { 105
153 pr_debug(" %s\n", clkp->name); 106 return rate;
154 clk_recalculate_rate(clkp);
155 propagate_rate(clkp);
156 }
157} 107}
158 108
159void clk_init(struct clk *c) 109unsigned long clk_get_rate(struct clk *c)
160{ 110{
161 unsigned long flags; 111 unsigned long flags;
112 unsigned long rate;
113
114 spin_lock_irqsave(&c->spinlock, flags);
162 115
163 pr_debug("%s: %s\n", __func__, c->name); 116 rate = clk_get_rate_locked(c);
164 117
165 spin_lock_irqsave(&clock_lock, flags); 118 spin_unlock_irqrestore(&c->spinlock, flags);
166 119
167 INIT_LIST_HEAD(&c->children); 120 return rate;
168 INIT_LIST_HEAD(&c->sibling); 121}
122EXPORT_SYMBOL(clk_get_rate);
123
124int clk_reparent(struct clk *c, struct clk *parent)
125{
126 c->parent = parent;
127 return 0;
128}
129
130void clk_init(struct clk *c)
131{
132 spin_lock_init(&c->spinlock);
169 133
170 if (c->ops && c->ops->init) 134 if (c->ops && c->ops->init)
171 c->ops->init(c); 135 c->ops->init(c);
172 136
173 clk_recalculate_rate(c); 137 if (!c->ops || !c->ops->enable) {
138 c->refcnt++;
139 c->set = true;
140 if (c->parent)
141 c->state = c->parent->state;
142 else
143 c->state = ON;
144 }
174 145
146 mutex_lock(&clock_list_lock);
175 list_add(&c->node, &clocks); 147 list_add(&c->node, &clocks);
176 148 mutex_unlock(&clock_list_lock);
177 if (c->parent)
178 list_add_tail(&c->sibling, &c->parent->children);
179
180 spin_unlock_irqrestore(&clock_lock, flags);
181} 149}
182 150
183int clk_enable_locked(struct clk *c) 151int clk_enable(struct clk *c)
184{ 152{
185 int ret; 153 int ret = 0;
186 pr_debug("%s: %s\n", __func__, c->name); 154 unsigned long flags;
155
156 spin_lock_irqsave(&c->spinlock, flags);
157
187 if (c->refcnt == 0) { 158 if (c->refcnt == 0) {
188 if (c->parent) { 159 if (c->parent) {
189 ret = clk_enable_locked(c->parent); 160 ret = clk_enable(c->parent);
190 if (ret) 161 if (ret)
191 return ret; 162 goto out;
192 } 163 }
193 164
194 if (c->ops && c->ops->enable) { 165 if (c->ops && c->ops->enable) {
195 ret = c->ops->enable(c); 166 ret = c->ops->enable(c);
196 if (ret) { 167 if (ret) {
197 if (c->parent) 168 if (c->parent)
198 clk_disable_locked(c->parent); 169 clk_disable(c->parent);
199 return ret; 170 goto out;
200 } 171 }
201 c->state = ON; 172 c->state = ON;
202#ifdef CONFIG_DEBUG_FS 173 c->set = true;
203 c->set = 1;
204#endif
205 } 174 }
206 } 175 }
207 c->refcnt++; 176 c->refcnt++;
208 177out:
209 return 0; 178 spin_unlock_irqrestore(&c->spinlock, flags);
210}
211
212int clk_enable_cansleep(struct clk *c)
213{
214 int ret;
215 unsigned long flags;
216
217 mutex_lock(&dvfs_lock);
218
219 if (clk_is_dvfs(c) && c->refcnt > 0)
220 dvfs_set_rate(c->dvfs, c->rate);
221
222 spin_lock_irqsave(&clock_lock, flags);
223 ret = clk_enable_locked(c);
224 spin_unlock_irqrestore(&clock_lock, flags);
225
226 mutex_unlock(&dvfs_lock);
227
228 return ret; 179 return ret;
229} 180}
230EXPORT_SYMBOL(clk_enable_cansleep); 181EXPORT_SYMBOL(clk_enable);
231 182
232int clk_enable(struct clk *c) 183void clk_disable(struct clk *c)
233{ 184{
234 int ret;
235 unsigned long flags; 185 unsigned long flags;
236 186
237 if (clk_is_dvfs(c)) 187 spin_lock_irqsave(&c->spinlock, flags);
238 BUG();
239
240 spin_lock_irqsave(&clock_lock, flags);
241 ret = clk_enable_locked(c);
242 spin_unlock_irqrestore(&clock_lock, flags);
243
244 return ret;
245}
246EXPORT_SYMBOL(clk_enable);
247 188
248void clk_disable_locked(struct clk *c)
249{
250 pr_debug("%s: %s\n", __func__, c->name);
251 if (c->refcnt == 0) { 189 if (c->refcnt == 0) {
252 WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); 190 WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
191 spin_unlock_irqrestore(&c->spinlock, flags);
253 return; 192 return;
254 } 193 }
255 if (c->refcnt == 1) { 194 if (c->refcnt == 1) {
@@ -257,71 +196,39 @@ void clk_disable_locked(struct clk *c)
257 c->ops->disable(c); 196 c->ops->disable(c);
258 197
259 if (c->parent) 198 if (c->parent)
260 clk_disable_locked(c->parent); 199 clk_disable(c->parent);
261 200
262 c->state = OFF; 201 c->state = OFF;
263 } 202 }
264 c->refcnt--; 203 c->refcnt--;
265}
266
267void clk_disable_cansleep(struct clk *c)
268{
269 unsigned long flags;
270
271 mutex_lock(&dvfs_lock);
272
273 spin_lock_irqsave(&clock_lock, flags);
274 clk_disable_locked(c);
275 spin_unlock_irqrestore(&clock_lock, flags);
276 204
277 if (clk_is_dvfs(c) && c->refcnt == 0) 205 spin_unlock_irqrestore(&c->spinlock, flags);
278 dvfs_set_rate(c->dvfs, c->rate);
279
280 mutex_unlock(&dvfs_lock);
281}
282EXPORT_SYMBOL(clk_disable_cansleep);
283
284void clk_disable(struct clk *c)
285{
286 unsigned long flags;
287
288 if (clk_is_dvfs(c))
289 BUG();
290
291 spin_lock_irqsave(&clock_lock, flags);
292 clk_disable_locked(c);
293 spin_unlock_irqrestore(&clock_lock, flags);
294} 206}
295EXPORT_SYMBOL(clk_disable); 207EXPORT_SYMBOL(clk_disable);
296 208
297int clk_set_parent_locked(struct clk *c, struct clk *parent) 209int clk_set_parent(struct clk *c, struct clk *parent)
298{ 210{
299 int ret; 211 int ret;
212 unsigned long flags;
213 unsigned long new_rate;
214 unsigned long old_rate;
300 215
301 pr_debug("%s: %s\n", __func__, c->name); 216 spin_lock_irqsave(&c->spinlock, flags);
302 217
303 if (!c->ops || !c->ops->set_parent) 218 if (!c->ops || !c->ops->set_parent) {
304 return -ENOSYS; 219 ret = -ENOSYS;
220 goto out;
221 }
305 222
306 ret = c->ops->set_parent(c, parent); 223 new_rate = clk_predict_rate_from_parent(c, parent);
224 old_rate = clk_get_rate_locked(c);
307 225
226 ret = c->ops->set_parent(c, parent);
308 if (ret) 227 if (ret)
309 return ret; 228 goto out;
310
311 clk_recalculate_rate(c);
312
313 propagate_rate(c);
314
315 return 0;
316}
317 229
318int clk_set_parent(struct clk *c, struct clk *parent) 230out:
319{ 231 spin_unlock_irqrestore(&c->spinlock, flags);
320 int ret;
321 unsigned long flags;
322 spin_lock_irqsave(&clock_lock, flags);
323 ret = clk_set_parent_locked(c, parent);
324 spin_unlock_irqrestore(&clock_lock, flags);
325 return ret; 232 return ret;
326} 233}
327EXPORT_SYMBOL(clk_set_parent); 234EXPORT_SYMBOL(clk_set_parent);
@@ -334,100 +241,86 @@ EXPORT_SYMBOL(clk_get_parent);
334 241
335int clk_set_rate_locked(struct clk *c, unsigned long rate) 242int clk_set_rate_locked(struct clk *c, unsigned long rate)
336{ 243{
337 int ret; 244 long new_rate;
338
339 if (rate > c->max_rate)
340 rate = c->max_rate;
341 245
342 if (!c->ops || !c->ops->set_rate) 246 if (!c->ops || !c->ops->set_rate)
343 return -ENOSYS; 247 return -ENOSYS;
344 248
345 ret = c->ops->set_rate(c, rate); 249 if (rate > c->max_rate)
346 250 rate = c->max_rate;
347 if (ret)
348 return ret;
349
350 clk_recalculate_rate(c);
351
352 propagate_rate(c);
353
354 return 0;
355}
356
357int clk_set_rate_cansleep(struct clk *c, unsigned long rate)
358{
359 int ret = 0;
360 unsigned long flags;
361
362 pr_debug("%s: %s\n", __func__, c->name);
363
364 mutex_lock(&dvfs_lock);
365
366 if (rate > c->rate)
367 ret = dvfs_set_rate(c->dvfs, rate);
368 if (ret)
369 goto out;
370 251
371 spin_lock_irqsave(&clock_lock, flags); 252 if (c->ops && c->ops->round_rate) {
372 ret = clk_set_rate_locked(c, rate); 253 new_rate = c->ops->round_rate(c, rate);
373 spin_unlock_irqrestore(&clock_lock, flags);
374 254
375 if (ret) 255 if (new_rate < 0)
376 goto out; 256 return new_rate;
377 257
378 ret = dvfs_set_rate(c->dvfs, rate); 258 rate = new_rate;
259 }
379 260
380out: 261 return c->ops->set_rate(c, rate);
381 mutex_unlock(&dvfs_lock);
382 return ret;
383} 262}
384EXPORT_SYMBOL(clk_set_rate_cansleep);
385 263
386int clk_set_rate(struct clk *c, unsigned long rate) 264int clk_set_rate(struct clk *c, unsigned long rate)
387{ 265{
388 int ret = 0; 266 int ret;
389 unsigned long flags; 267 unsigned long flags;
390 268
391 pr_debug("%s: %s\n", __func__, c->name); 269 spin_lock_irqsave(&c->spinlock, flags);
392
393 if (clk_is_dvfs(c))
394 BUG();
395 270
396 spin_lock_irqsave(&clock_lock, flags);
397 ret = clk_set_rate_locked(c, rate); 271 ret = clk_set_rate_locked(c, rate);
398 spin_unlock_irqrestore(&clock_lock, flags); 272
273 spin_unlock_irqrestore(&c->spinlock, flags);
399 274
400 return ret; 275 return ret;
401} 276}
402EXPORT_SYMBOL(clk_set_rate); 277EXPORT_SYMBOL(clk_set_rate);
403 278
404unsigned long clk_get_rate(struct clk *c)
405{
406 unsigned long flags;
407 unsigned long ret;
408
409 spin_lock_irqsave(&clock_lock, flags);
410 279
411 pr_debug("%s: %s\n", __func__, c->name); 280/* Must be called with clocks lock and all indvidual clock locks held */
281unsigned long clk_get_rate_all_locked(struct clk *c)
282{
283 u64 rate;
284 int mul = 1;
285 int div = 1;
286 struct clk *p = c;
287
288 while (p) {
289 c = p;
290 if (c->mul != 0 && c->div != 0) {
291 mul *= c->mul;
292 div *= c->div;
293 }
294 p = c->parent;
295 }
412 296
413 ret = c->rate; 297 rate = c->rate;
298 rate *= mul;
299 do_div(rate, div);
414 300
415 spin_unlock_irqrestore(&clock_lock, flags); 301 return rate;
416 return ret;
417} 302}
418EXPORT_SYMBOL(clk_get_rate);
419 303
420long clk_round_rate(struct clk *c, unsigned long rate) 304long clk_round_rate(struct clk *c, unsigned long rate)
421{ 305{
422 pr_debug("%s: %s\n", __func__, c->name); 306 unsigned long flags;
307 long ret;
423 308
424 if (!c->ops || !c->ops->round_rate) 309 spin_lock_irqsave(&c->spinlock, flags);
425 return -ENOSYS; 310
311 if (!c->ops || !c->ops->round_rate) {
312 ret = -ENOSYS;
313 goto out;
314 }
426 315
427 if (rate > c->max_rate) 316 if (rate > c->max_rate)
428 rate = c->max_rate; 317 rate = c->max_rate;
429 318
430 return c->ops->round_rate(c, rate); 319 ret = c->ops->round_rate(c, rate);
320
321out:
322 spin_unlock_irqrestore(&c->spinlock, flags);
323 return ret;
431} 324}
432EXPORT_SYMBOL(clk_round_rate); 325EXPORT_SYMBOL(clk_round_rate);
433 326
@@ -509,31 +402,90 @@ void __init tegra_init_clock(void)
509 tegra2_init_clocks(); 402 tegra2_init_clocks();
510} 403}
511 404
512int __init tegra_init_dvfs(void) 405/*
406 * The SDMMC controllers have extra bits in the clock source register that
407 * adjust the delay between the clock and data to compenstate for delays
408 * on the PCB.
409 */
410void tegra_sdmmc_tap_delay(struct clk *c, int delay)
513{ 411{
514 struct clk *c, *safe; 412 unsigned long flags;
413
414 spin_lock_irqsave(&c->spinlock, flags);
415 tegra2_sdmmc_tap_delay(c, delay);
416 spin_unlock_irqrestore(&c->spinlock, flags);
417}
515 418
516 mutex_lock(&dvfs_lock); 419#ifdef CONFIG_DEBUG_FS
517 420
518 list_for_each_entry_safe(c, safe, &clocks, node) 421static int __clk_lock_all_spinlocks(void)
519 if (c->dvfs) 422{
520 dvfs_init(c); 423 struct clk *c;
521 424
522 mutex_unlock(&dvfs_lock); 425 list_for_each_entry(c, &clocks, node)
426 if (!spin_trylock(&c->spinlock))
427 goto unlock_spinlocks;
523 428
524 return 0; 429 return 0;
430
431unlock_spinlocks:
432 list_for_each_entry_continue_reverse(c, &clocks, node)
433 spin_unlock(&c->spinlock);
434
435 return -EAGAIN;
525} 436}
526 437
527late_initcall(tegra_init_dvfs); 438static void __clk_unlock_all_spinlocks(void)
439{
440 struct clk *c;
441
442 list_for_each_entry_reverse(c, &clocks, node)
443 spin_unlock(&c->spinlock);
444}
445
446/*
447 * This function retries until it can take all locks, and may take
448 * an arbitrarily long time to complete.
449 * Must be called with irqs enabled, returns with irqs disabled
450 * Must be called with clock_list_lock held
451 */
452static void clk_lock_all(void)
453{
454 int ret;
455retry:
456 local_irq_disable();
457
458 ret = __clk_lock_all_spinlocks();
459 if (ret)
460 goto failed_spinlocks;
461
462 /* All locks taken successfully, return */
463 return;
464
465failed_spinlocks:
466 local_irq_enable();
467 yield();
468 goto retry;
469}
470
471/*
472 * Unlocks all clocks after a clk_lock_all
473 * Must be called with irqs disabled, returns with irqs enabled
474 * Must be called with clock_list_lock held
475 */
476static void clk_unlock_all(void)
477{
478 __clk_unlock_all_spinlocks();
479
480 local_irq_enable();
481}
528 482
529#ifdef CONFIG_DEBUG_FS
530static struct dentry *clk_debugfs_root; 483static struct dentry *clk_debugfs_root;
531 484
532 485
533static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) 486static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
534{ 487{
535 struct clk *child; 488 struct clk *child;
536 struct clk *safe;
537 const char *state = "uninit"; 489 const char *state = "uninit";
538 char div[8] = {0}; 490 char div[8] = {0};
539 491
@@ -564,8 +516,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
564 c->rate > c->max_rate ? '!' : ' ', 516 c->rate > c->max_rate ? '!' : ' ',
565 !c->set ? '*' : ' ', 517 !c->set ? '*' : ' ',
566 30 - level * 3, c->name, 518 30 - level * 3, c->name,
567 state, c->refcnt, div, c->rate); 519 state, c->refcnt, div, clk_get_rate_all_locked(c));
568 list_for_each_entry_safe(child, safe, &c->children, sibling) { 520
521 list_for_each_entry(child, &clocks, node) {
522 if (child->parent != c)
523 continue;
524
569 clock_tree_show_one(s, child, level + 1); 525 clock_tree_show_one(s, child, level + 1);
570 } 526 }
571} 527}
@@ -573,14 +529,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
573static int clock_tree_show(struct seq_file *s, void *data) 529static int clock_tree_show(struct seq_file *s, void *data)
574{ 530{
575 struct clk *c; 531 struct clk *c;
576 unsigned long flags;
577 seq_printf(s, " clock state ref div rate\n"); 532 seq_printf(s, " clock state ref div rate\n");
578 seq_printf(s, "--------------------------------------------------------------\n"); 533 seq_printf(s, "--------------------------------------------------------------\n");
579 spin_lock_irqsave(&clock_lock, flags); 534
535 mutex_lock(&clock_list_lock);
536
537 clk_lock_all();
538
580 list_for_each_entry(c, &clocks, node) 539 list_for_each_entry(c, &clocks, node)
581 if (c->parent == NULL) 540 if (c->parent == NULL)
582 clock_tree_show_one(s, c, 0); 541 clock_tree_show_one(s, c, 0);
583 spin_unlock_irqrestore(&clock_lock, flags); 542
543 clk_unlock_all();
544
545 mutex_unlock(&clock_list_lock);
584 return 0; 546 return 0;
585} 547}
586 548