diff options
Diffstat (limited to 'arch/arm/mach-tegra/clock.c')
-rw-r--r-- | arch/arm/mach-tegra/clock.c | 351 |
1 files changed, 226 insertions, 125 deletions
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index 8fd96bfb0cde..aff4c5b8c378 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c | |||
@@ -18,83 +18,117 @@ | |||
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <linux/list.h> | 21 | #include <linux/clkdev.h> |
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/delay.h> | ||
22 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/list.h> | ||
23 | #include <linux/module.h> | 26 | #include <linux/module.h> |
24 | #include <linux/debugfs.h> | 27 | #include <linux/sched.h> |
25 | #include <linux/slab.h> | ||
26 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
27 | #include <linux/clkdev.h> | 29 | #include <linux/slab.h> |
30 | |||
31 | #include <mach/clk.h> | ||
28 | 32 | ||
29 | #include "board.h" | 33 | #include "board.h" |
30 | #include "clock.h" | 34 | #include "clock.h" |
31 | 35 | ||
36 | /* | ||
37 | * Locking: | ||
38 | * | ||
39 | * Each struct clk has a spinlock. | ||
40 | * | ||
41 | * To avoid AB-BA locking problems, locks must always be traversed from child | ||
42 | * clock to parent clock. For example, when enabling a clock, the clock's lock | ||
43 | * is taken, and then clk_enable is called on the parent, which take's the | ||
44 | * parent clock's lock. There is one exceptions to this ordering: When dumping | ||
45 | * the clock tree through debugfs. In this case, clk_lock_all is called, | ||
46 | * which attemps to iterate through the entire list of clocks and take every | ||
47 | * clock lock. If any call to spin_trylock fails, all locked clocks are | ||
48 | * unlocked, and the process is retried. When all the locks are held, | ||
49 | * the only clock operation that can be called is clk_get_rate_all_locked. | ||
50 | * | ||
51 | * Within a single clock, no clock operation can call another clock operation | ||
52 | * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any | ||
53 | * clock operation can call any other clock operation on any of it's possible | ||
54 | * parents. | ||
55 | * | ||
56 | * An additional mutex, clock_list_lock, is used to protect the list of all | ||
57 | * clocks. | ||
58 | * | ||
59 | * The clock operations must lock internally to protect against | ||
60 | * read-modify-write on registers that are shared by multiple clocks | ||
61 | */ | ||
62 | static DEFINE_MUTEX(clock_list_lock); | ||
32 | static LIST_HEAD(clocks); | 63 | static LIST_HEAD(clocks); |
33 | 64 | ||
34 | static DEFINE_SPINLOCK(clock_lock); | ||
35 | struct clk *tegra_get_clock_by_name(const char *name) | 65 | struct clk *tegra_get_clock_by_name(const char *name) |
36 | { | 66 | { |
37 | struct clk *c; | 67 | struct clk *c; |
38 | struct clk *ret = NULL; | 68 | struct clk *ret = NULL; |
39 | unsigned long flags; | 69 | mutex_lock(&clock_list_lock); |
40 | spin_lock_irqsave(&clock_lock, flags); | ||
41 | list_for_each_entry(c, &clocks, node) { | 70 | list_for_each_entry(c, &clocks, node) { |
42 | if (strcmp(c->name, name) == 0) { | 71 | if (strcmp(c->name, name) == 0) { |
43 | ret = c; | 72 | ret = c; |
44 | break; | 73 | break; |
45 | } | 74 | } |
46 | } | 75 | } |
47 | spin_unlock_irqrestore(&clock_lock, flags); | 76 | mutex_unlock(&clock_list_lock); |
48 | return ret; | 77 | return ret; |
49 | } | 78 | } |
50 | 79 | ||
51 | static void clk_recalculate_rate(struct clk *c) | 80 | /* Must be called with c->spinlock held */ |
81 | static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p) | ||
52 | { | 82 | { |
53 | u64 rate; | 83 | u64 rate; |
54 | 84 | ||
55 | if (!c->parent) | 85 | rate = clk_get_rate(p); |
56 | return; | ||
57 | |||
58 | rate = c->parent->rate; | ||
59 | 86 | ||
60 | if (c->mul != 0 && c->div != 0) { | 87 | if (c->mul != 0 && c->div != 0) { |
61 | rate = rate * c->mul; | 88 | rate *= c->mul; |
62 | do_div(rate, c->div); | 89 | do_div(rate, c->div); |
63 | } | 90 | } |
64 | 91 | ||
65 | if (rate > c->max_rate) | 92 | return rate; |
66 | pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n", | ||
67 | c->name, rate, c->max_rate); | ||
68 | |||
69 | c->rate = rate; | ||
70 | } | 93 | } |
71 | 94 | ||
72 | int clk_reparent(struct clk *c, struct clk *parent) | 95 | /* Must be called with c->spinlock held */ |
96 | unsigned long clk_get_rate_locked(struct clk *c) | ||
73 | { | 97 | { |
74 | c->parent = parent; | 98 | unsigned long rate; |
75 | list_del(&c->sibling); | ||
76 | list_add_tail(&c->sibling, &parent->children); | ||
77 | return 0; | ||
78 | } | ||
79 | 99 | ||
80 | static void propagate_rate(struct clk *c) | 100 | if (c->parent) |
81 | { | 101 | rate = clk_predict_rate_from_parent(c, c->parent); |
82 | struct clk *clkp; | 102 | else |
103 | rate = c->rate; | ||
83 | 104 | ||
84 | list_for_each_entry(clkp, &c->children, sibling) { | 105 | return rate; |
85 | clk_recalculate_rate(clkp); | ||
86 | propagate_rate(clkp); | ||
87 | } | ||
88 | } | 106 | } |
89 | 107 | ||
90 | void clk_init(struct clk *c) | 108 | unsigned long clk_get_rate(struct clk *c) |
91 | { | 109 | { |
92 | unsigned long flags; | 110 | unsigned long flags; |
111 | unsigned long rate; | ||
93 | 112 | ||
94 | spin_lock_irqsave(&clock_lock, flags); | 113 | spin_lock_irqsave(&c->spinlock, flags); |
95 | 114 | ||
96 | INIT_LIST_HEAD(&c->children); | 115 | rate = clk_get_rate_locked(c); |
97 | INIT_LIST_HEAD(&c->sibling); | 116 | |
117 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
118 | |||
119 | return rate; | ||
120 | } | ||
121 | EXPORT_SYMBOL(clk_get_rate); | ||
122 | |||
123 | int clk_reparent(struct clk *c, struct clk *parent) | ||
124 | { | ||
125 | c->parent = parent; | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void clk_init(struct clk *c) | ||
130 | { | ||
131 | spin_lock_init(&c->spinlock); | ||
98 | 132 | ||
99 | if (c->ops && c->ops->init) | 133 | if (c->ops && c->ops->init) |
100 | c->ops->init(c); | 134 | c->ops->init(c); |
@@ -108,33 +142,31 @@ void clk_init(struct clk *c) | |||
108 | c->state = ON; | 142 | c->state = ON; |
109 | } | 143 | } |
110 | 144 | ||
111 | clk_recalculate_rate(c); | 145 | mutex_lock(&clock_list_lock); |
112 | |||
113 | list_add(&c->node, &clocks); | 146 | list_add(&c->node, &clocks); |
114 | 147 | mutex_unlock(&clock_list_lock); | |
115 | if (c->parent) | ||
116 | list_add_tail(&c->sibling, &c->parent->children); | ||
117 | |||
118 | spin_unlock_irqrestore(&clock_lock, flags); | ||
119 | } | 148 | } |
120 | 149 | ||
121 | int clk_enable_locked(struct clk *c) | 150 | int clk_enable(struct clk *c) |
122 | { | 151 | { |
123 | int ret; | 152 | int ret = 0; |
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&c->spinlock, flags); | ||
124 | 156 | ||
125 | if (c->refcnt == 0) { | 157 | if (c->refcnt == 0) { |
126 | if (c->parent) { | 158 | if (c->parent) { |
127 | ret = clk_enable_locked(c->parent); | 159 | ret = clk_enable(c->parent); |
128 | if (ret) | 160 | if (ret) |
129 | return ret; | 161 | goto out; |
130 | } | 162 | } |
131 | 163 | ||
132 | if (c->ops && c->ops->enable) { | 164 | if (c->ops && c->ops->enable) { |
133 | ret = c->ops->enable(c); | 165 | ret = c->ops->enable(c); |
134 | if (ret) { | 166 | if (ret) { |
135 | if (c->parent) | 167 | if (c->parent) |
136 | clk_disable_locked(c->parent); | 168 | clk_disable(c->parent); |
137 | return ret; | 169 | goto out; |
138 | } | 170 | } |
139 | c->state = ON; | 171 | c->state = ON; |
140 | #ifdef CONFIG_DEBUG_FS | 172 | #ifdef CONFIG_DEBUG_FS |
@@ -143,27 +175,21 @@ int clk_enable_locked(struct clk *c) | |||
143 | } | 175 | } |
144 | } | 176 | } |
145 | c->refcnt++; | 177 | c->refcnt++; |
146 | 178 | out: | |
147 | return 0; | 179 | spin_unlock_irqrestore(&c->spinlock, flags); |
180 | return ret; | ||
148 | } | 181 | } |
182 | EXPORT_SYMBOL(clk_enable); | ||
149 | 183 | ||
150 | int clk_enable(struct clk *c) | 184 | void clk_disable(struct clk *c) |
151 | { | 185 | { |
152 | int ret; | ||
153 | unsigned long flags; | 186 | unsigned long flags; |
154 | 187 | ||
155 | spin_lock_irqsave(&clock_lock, flags); | 188 | spin_lock_irqsave(&c->spinlock, flags); |
156 | ret = clk_enable_locked(c); | ||
157 | spin_unlock_irqrestore(&clock_lock, flags); | ||
158 | 189 | ||
159 | return ret; | ||
160 | } | ||
161 | EXPORT_SYMBOL(clk_enable); | ||
162 | |||
163 | void clk_disable_locked(struct clk *c) | ||
164 | { | ||
165 | if (c->refcnt == 0) { | 190 | if (c->refcnt == 0) { |
166 | WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); | 191 | WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); |
192 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
167 | return; | 193 | return; |
168 | } | 194 | } |
169 | if (c->refcnt == 1) { | 195 | if (c->refcnt == 1) { |
@@ -171,49 +197,39 @@ void clk_disable_locked(struct clk *c) | |||
171 | c->ops->disable(c); | 197 | c->ops->disable(c); |
172 | 198 | ||
173 | if (c->parent) | 199 | if (c->parent) |
174 | clk_disable_locked(c->parent); | 200 | clk_disable(c->parent); |
175 | 201 | ||
176 | c->state = OFF; | 202 | c->state = OFF; |
177 | } | 203 | } |
178 | c->refcnt--; | 204 | c->refcnt--; |
179 | } | ||
180 | 205 | ||
181 | void clk_disable(struct clk *c) | 206 | spin_unlock_irqrestore(&c->spinlock, flags); |
182 | { | ||
183 | unsigned long flags; | ||
184 | |||
185 | spin_lock_irqsave(&clock_lock, flags); | ||
186 | clk_disable_locked(c); | ||
187 | spin_unlock_irqrestore(&clock_lock, flags); | ||
188 | } | 207 | } |
189 | EXPORT_SYMBOL(clk_disable); | 208 | EXPORT_SYMBOL(clk_disable); |
190 | 209 | ||
191 | int clk_set_parent_locked(struct clk *c, struct clk *parent) | 210 | int clk_set_parent(struct clk *c, struct clk *parent) |
192 | { | 211 | { |
193 | int ret; | 212 | int ret; |
213 | unsigned long flags; | ||
214 | unsigned long new_rate; | ||
215 | unsigned long old_rate; | ||
194 | 216 | ||
195 | if (!c->ops || !c->ops->set_parent) | 217 | spin_lock_irqsave(&c->spinlock, flags); |
196 | return -ENOSYS; | ||
197 | 218 | ||
198 | ret = c->ops->set_parent(c, parent); | 219 | if (!c->ops || !c->ops->set_parent) { |
199 | 220 | ret = -ENOSYS; | |
200 | if (ret) | 221 | goto out; |
201 | return ret; | 222 | } |
202 | |||
203 | clk_recalculate_rate(c); | ||
204 | 223 | ||
205 | propagate_rate(c); | 224 | new_rate = clk_predict_rate_from_parent(c, parent); |
225 | old_rate = clk_get_rate_locked(c); | ||
206 | 226 | ||
207 | return 0; | 227 | ret = c->ops->set_parent(c, parent); |
208 | } | 228 | if (ret) |
229 | goto out; | ||
209 | 230 | ||
210 | int clk_set_parent(struct clk *c, struct clk *parent) | 231 | out: |
211 | { | 232 | spin_unlock_irqrestore(&c->spinlock, flags); |
212 | int ret; | ||
213 | unsigned long flags; | ||
214 | spin_lock_irqsave(&clock_lock, flags); | ||
215 | ret = clk_set_parent_locked(c, parent); | ||
216 | spin_unlock_irqrestore(&clock_lock, flags); | ||
217 | return ret; | 233 | return ret; |
218 | } | 234 | } |
219 | EXPORT_SYMBOL(clk_set_parent); | 235 | EXPORT_SYMBOL(clk_set_parent); |
@@ -226,62 +242,75 @@ EXPORT_SYMBOL(clk_get_parent); | |||
226 | 242 | ||
227 | int clk_set_rate_locked(struct clk *c, unsigned long rate) | 243 | int clk_set_rate_locked(struct clk *c, unsigned long rate) |
228 | { | 244 | { |
229 | int ret; | ||
230 | |||
231 | if (rate > c->max_rate) | ||
232 | rate = c->max_rate; | ||
233 | |||
234 | if (!c->ops || !c->ops->set_rate) | 245 | if (!c->ops || !c->ops->set_rate) |
235 | return -ENOSYS; | 246 | return -ENOSYS; |
236 | 247 | ||
237 | ret = c->ops->set_rate(c, rate); | 248 | if (rate > c->max_rate) |
238 | 249 | rate = c->max_rate; | |
239 | if (ret) | ||
240 | return ret; | ||
241 | |||
242 | clk_recalculate_rate(c); | ||
243 | |||
244 | propagate_rate(c); | ||
245 | 250 | ||
246 | return 0; | 251 | return c->ops->set_rate(c, rate); |
247 | } | 252 | } |
248 | 253 | ||
249 | int clk_set_rate(struct clk *c, unsigned long rate) | 254 | int clk_set_rate(struct clk *c, unsigned long rate) |
250 | { | 255 | { |
251 | int ret = 0; | 256 | int ret; |
252 | unsigned long flags; | 257 | unsigned long flags; |
253 | 258 | ||
254 | spin_lock_irqsave(&clock_lock, flags); | 259 | spin_lock_irqsave(&c->spinlock, flags); |
260 | |||
255 | ret = clk_set_rate_locked(c, rate); | 261 | ret = clk_set_rate_locked(c, rate); |
256 | spin_unlock_irqrestore(&clock_lock, flags); | 262 | |
263 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
257 | 264 | ||
258 | return ret; | 265 | return ret; |
259 | } | 266 | } |
260 | EXPORT_SYMBOL(clk_set_rate); | 267 | EXPORT_SYMBOL(clk_set_rate); |
261 | 268 | ||
262 | unsigned long clk_get_rate(struct clk *c) | ||
263 | { | ||
264 | unsigned long flags; | ||
265 | unsigned long ret; | ||
266 | 269 | ||
267 | spin_lock_irqsave(&clock_lock, flags); | 270 | /* Must be called with clocks lock and all indvidual clock locks held */ |
271 | unsigned long clk_get_rate_all_locked(struct clk *c) | ||
272 | { | ||
273 | u64 rate; | ||
274 | int mul = 1; | ||
275 | int div = 1; | ||
276 | struct clk *p = c; | ||
277 | |||
278 | while (p) { | ||
279 | c = p; | ||
280 | if (c->mul != 0 && c->div != 0) { | ||
281 | mul *= c->mul; | ||
282 | div *= c->div; | ||
283 | } | ||
284 | p = c->parent; | ||
285 | } | ||
268 | 286 | ||
269 | ret = c->rate; | 287 | rate = c->rate; |
288 | rate *= mul; | ||
289 | do_div(rate, div); | ||
270 | 290 | ||
271 | spin_unlock_irqrestore(&clock_lock, flags); | 291 | return rate; |
272 | return ret; | ||
273 | } | 292 | } |
274 | EXPORT_SYMBOL(clk_get_rate); | ||
275 | 293 | ||
276 | long clk_round_rate(struct clk *c, unsigned long rate) | 294 | long clk_round_rate(struct clk *c, unsigned long rate) |
277 | { | 295 | { |
278 | if (!c->ops || !c->ops->round_rate) | 296 | unsigned long flags; |
279 | return -ENOSYS; | 297 | long ret; |
298 | |||
299 | spin_lock_irqsave(&c->spinlock, flags); | ||
300 | |||
301 | if (!c->ops || !c->ops->round_rate) { | ||
302 | ret = -ENOSYS; | ||
303 | goto out; | ||
304 | } | ||
280 | 305 | ||
281 | if (rate > c->max_rate) | 306 | if (rate > c->max_rate) |
282 | rate = c->max_rate; | 307 | rate = c->max_rate; |
283 | 308 | ||
284 | return c->ops->round_rate(c, rate); | 309 | ret = c->ops->round_rate(c, rate); |
310 | |||
311 | out: | ||
312 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
313 | return ret; | ||
285 | } | 314 | } |
286 | EXPORT_SYMBOL(clk_round_rate); | 315 | EXPORT_SYMBOL(clk_round_rate); |
287 | 316 | ||
@@ -364,13 +393,75 @@ void __init tegra_init_clock(void) | |||
364 | } | 393 | } |
365 | 394 | ||
366 | #ifdef CONFIG_DEBUG_FS | 395 | #ifdef CONFIG_DEBUG_FS |
396 | |||
397 | static int __clk_lock_all_spinlocks(void) | ||
398 | { | ||
399 | struct clk *c; | ||
400 | |||
401 | list_for_each_entry(c, &clocks, node) | ||
402 | if (!spin_trylock(&c->spinlock)) | ||
403 | goto unlock_spinlocks; | ||
404 | |||
405 | return 0; | ||
406 | |||
407 | unlock_spinlocks: | ||
408 | list_for_each_entry_continue_reverse(c, &clocks, node) | ||
409 | spin_unlock(&c->spinlock); | ||
410 | |||
411 | return -EAGAIN; | ||
412 | } | ||
413 | |||
414 | static void __clk_unlock_all_spinlocks(void) | ||
415 | { | ||
416 | struct clk *c; | ||
417 | |||
418 | list_for_each_entry_reverse(c, &clocks, node) | ||
419 | spin_unlock(&c->spinlock); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * This function retries until it can take all locks, and may take | ||
424 | * an arbitrarily long time to complete. | ||
425 | * Must be called with irqs enabled, returns with irqs disabled | ||
426 | * Must be called with clock_list_lock held | ||
427 | */ | ||
428 | static void clk_lock_all(void) | ||
429 | { | ||
430 | int ret; | ||
431 | retry: | ||
432 | local_irq_disable(); | ||
433 | |||
434 | ret = __clk_lock_all_spinlocks(); | ||
435 | if (ret) | ||
436 | goto failed_spinlocks; | ||
437 | |||
438 | /* All locks taken successfully, return */ | ||
439 | return; | ||
440 | |||
441 | failed_spinlocks: | ||
442 | local_irq_enable(); | ||
443 | yield(); | ||
444 | goto retry; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Unlocks all clocks after a clk_lock_all | ||
449 | * Must be called with irqs disabled, returns with irqs enabled | ||
450 | * Must be called with clock_list_lock held | ||
451 | */ | ||
452 | static void clk_unlock_all(void) | ||
453 | { | ||
454 | __clk_unlock_all_spinlocks(); | ||
455 | |||
456 | local_irq_enable(); | ||
457 | } | ||
458 | |||
367 | static struct dentry *clk_debugfs_root; | 459 | static struct dentry *clk_debugfs_root; |
368 | 460 | ||
369 | 461 | ||
370 | static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | 462 | static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) |
371 | { | 463 | { |
372 | struct clk *child; | 464 | struct clk *child; |
373 | struct clk *safe; | ||
374 | const char *state = "uninit"; | 465 | const char *state = "uninit"; |
375 | char div[8] = {0}; | 466 | char div[8] = {0}; |
376 | 467 | ||
@@ -401,8 +492,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | |||
401 | c->rate > c->max_rate ? '!' : ' ', | 492 | c->rate > c->max_rate ? '!' : ' ', |
402 | !c->set ? '*' : ' ', | 493 | !c->set ? '*' : ' ', |
403 | 30 - level * 3, c->name, | 494 | 30 - level * 3, c->name, |
404 | state, c->refcnt, div, c->rate); | 495 | state, c->refcnt, div, clk_get_rate_all_locked(c)); |
405 | list_for_each_entry_safe(child, safe, &c->children, sibling) { | 496 | |
497 | list_for_each_entry(child, &clocks, node) { | ||
498 | if (child->parent != c) | ||
499 | continue; | ||
500 | |||
406 | clock_tree_show_one(s, child, level + 1); | 501 | clock_tree_show_one(s, child, level + 1); |
407 | } | 502 | } |
408 | } | 503 | } |
@@ -410,14 +505,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | |||
410 | static int clock_tree_show(struct seq_file *s, void *data) | 505 | static int clock_tree_show(struct seq_file *s, void *data) |
411 | { | 506 | { |
412 | struct clk *c; | 507 | struct clk *c; |
413 | unsigned long flags; | ||
414 | seq_printf(s, " clock state ref div rate\n"); | 508 | seq_printf(s, " clock state ref div rate\n"); |
415 | seq_printf(s, "--------------------------------------------------------------\n"); | 509 | seq_printf(s, "--------------------------------------------------------------\n"); |
416 | spin_lock_irqsave(&clock_lock, flags); | 510 | |
511 | mutex_lock(&clock_list_lock); | ||
512 | |||
513 | clk_lock_all(); | ||
514 | |||
417 | list_for_each_entry(c, &clocks, node) | 515 | list_for_each_entry(c, &clocks, node) |
418 | if (c->parent == NULL) | 516 | if (c->parent == NULL) |
419 | clock_tree_show_one(s, c, 0); | 517 | clock_tree_show_one(s, c, 0); |
420 | spin_unlock_irqrestore(&clock_lock, flags); | 518 | |
519 | clk_unlock_all(); | ||
520 | |||
521 | mutex_unlock(&clock_list_lock); | ||
421 | return 0; | 522 | return 0; |
422 | } | 523 | } |
423 | 524 | ||