diff options
Diffstat (limited to 'arch/sh/kernel/cpu/clock.c')
-rw-r--r-- | arch/sh/kernel/cpu/clock.c | 287 |
1 files changed, 287 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c new file mode 100644 index 000000000000..989e7fdd524d --- /dev/null +++ b/arch/sh/kernel/cpu/clock.c | |||
@@ -0,0 +1,287 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/clock.c - SuperH clock framework | ||
3 | * | ||
4 | * Copyright (C) 2005 Paul Mundt | ||
5 | * | ||
6 | * This clock framework is derived from the OMAP version by: | ||
7 | * | ||
8 | * Copyright (C) 2004 Nokia Corporation | ||
9 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/list.h> | ||
19 | #include <linux/kref.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <asm/clock.h> | ||
23 | #include <asm/timer.h> | ||
24 | |||
25 | static LIST_HEAD(clock_list); | ||
26 | static DEFINE_SPINLOCK(clock_lock); | ||
27 | static DECLARE_MUTEX(clock_list_sem); | ||
28 | |||
29 | /* | ||
30 | * Each subtype is expected to define the init routines for these clocks, | ||
31 | * as each subtype (or processor family) will have these clocks at the | ||
32 | * very least. These are all provided through the CPG, which even some of | ||
33 | * the more quirky parts (such as ST40, SH4-202, etc.) still have. | ||
34 | * | ||
35 | * The processor-specific code is expected to register any additional | ||
36 | * clock sources that are of interest. | ||
37 | */ | ||
38 | static struct clk master_clk = { | ||
39 | .name = "master_clk", | ||
40 | .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | ||
41 | #ifdef CONFIG_SH_PCLK_FREQ_BOOL | ||
42 | .rate = CONFIG_SH_PCLK_FREQ, | ||
43 | #endif | ||
44 | }; | ||
45 | |||
46 | static struct clk module_clk = { | ||
47 | .name = "module_clk", | ||
48 | .parent = &master_clk, | ||
49 | .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | ||
50 | }; | ||
51 | |||
52 | static struct clk bus_clk = { | ||
53 | .name = "bus_clk", | ||
54 | .parent = &master_clk, | ||
55 | .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | ||
56 | }; | ||
57 | |||
58 | static struct clk cpu_clk = { | ||
59 | .name = "cpu_clk", | ||
60 | .parent = &master_clk, | ||
61 | .flags = CLK_ALWAYS_ENABLED, | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * The ordering of these clocks matters, do not change it. | ||
66 | */ | ||
67 | static struct clk *onchip_clocks[] = { | ||
68 | &master_clk, | ||
69 | &module_clk, | ||
70 | &bus_clk, | ||
71 | &cpu_clk, | ||
72 | }; | ||
73 | |||
74 | static void propagate_rate(struct clk *clk) | ||
75 | { | ||
76 | struct clk *clkp; | ||
77 | |||
78 | list_for_each_entry(clkp, &clock_list, node) { | ||
79 | if (likely(clkp->parent != clk)) | ||
80 | continue; | ||
81 | if (likely(clkp->ops && clkp->ops->recalc)) | ||
82 | clkp->ops->recalc(clkp); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | int __clk_enable(struct clk *clk) | ||
87 | { | ||
88 | /* | ||
89 | * See if this is the first time we're enabling the clock, some | ||
90 | * clocks that are always enabled still require "special" | ||
91 | * initialization. This is especially true if the clock mode | ||
92 | * changes and the clock needs to hunt for the proper set of | ||
93 | * divisors to use before it can effectively recalc. | ||
94 | */ | ||
95 | if (unlikely(atomic_read(&clk->kref.refcount) == 1)) | ||
96 | if (clk->ops && clk->ops->init) | ||
97 | clk->ops->init(clk); | ||
98 | |||
99 | if (clk->flags & CLK_ALWAYS_ENABLED) | ||
100 | return 0; | ||
101 | |||
102 | if (likely(clk->ops && clk->ops->enable)) | ||
103 | clk->ops->enable(clk); | ||
104 | |||
105 | kref_get(&clk->kref); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | int clk_enable(struct clk *clk) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | int ret; | ||
113 | |||
114 | spin_lock_irqsave(&clock_lock, flags); | ||
115 | ret = __clk_enable(clk); | ||
116 | spin_unlock_irqrestore(&clock_lock, flags); | ||
117 | |||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | static void clk_kref_release(struct kref *kref) | ||
122 | { | ||
123 | /* Nothing to do */ | ||
124 | } | ||
125 | |||
126 | void __clk_disable(struct clk *clk) | ||
127 | { | ||
128 | if (clk->flags & CLK_ALWAYS_ENABLED) | ||
129 | return; | ||
130 | |||
131 | kref_put(&clk->kref, clk_kref_release); | ||
132 | } | ||
133 | |||
134 | void clk_disable(struct clk *clk) | ||
135 | { | ||
136 | unsigned long flags; | ||
137 | |||
138 | spin_lock_irqsave(&clock_lock, flags); | ||
139 | __clk_disable(clk); | ||
140 | spin_unlock_irqrestore(&clock_lock, flags); | ||
141 | } | ||
142 | |||
143 | int clk_register(struct clk *clk) | ||
144 | { | ||
145 | down(&clock_list_sem); | ||
146 | |||
147 | list_add(&clk->node, &clock_list); | ||
148 | kref_init(&clk->kref); | ||
149 | |||
150 | up(&clock_list_sem); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | void clk_unregister(struct clk *clk) | ||
156 | { | ||
157 | down(&clock_list_sem); | ||
158 | list_del(&clk->node); | ||
159 | up(&clock_list_sem); | ||
160 | } | ||
161 | |||
162 | inline unsigned long clk_get_rate(struct clk *clk) | ||
163 | { | ||
164 | return clk->rate; | ||
165 | } | ||
166 | |||
167 | int clk_set_rate(struct clk *clk, unsigned long rate) | ||
168 | { | ||
169 | int ret = -EOPNOTSUPP; | ||
170 | |||
171 | if (likely(clk->ops && clk->ops->set_rate)) { | ||
172 | unsigned long flags; | ||
173 | |||
174 | spin_lock_irqsave(&clock_lock, flags); | ||
175 | ret = clk->ops->set_rate(clk, rate); | ||
176 | spin_unlock_irqrestore(&clock_lock, flags); | ||
177 | } | ||
178 | |||
179 | if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) | ||
180 | propagate_rate(clk); | ||
181 | |||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | void clk_recalc_rate(struct clk *clk) | ||
186 | { | ||
187 | if (likely(clk->ops && clk->ops->recalc)) { | ||
188 | unsigned long flags; | ||
189 | |||
190 | spin_lock_irqsave(&clock_lock, flags); | ||
191 | clk->ops->recalc(clk); | ||
192 | spin_unlock_irqrestore(&clock_lock, flags); | ||
193 | } | ||
194 | |||
195 | if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) | ||
196 | propagate_rate(clk); | ||
197 | } | ||
198 | |||
199 | struct clk *clk_get(const char *id) | ||
200 | { | ||
201 | struct clk *p, *clk = ERR_PTR(-ENOENT); | ||
202 | |||
203 | down(&clock_list_sem); | ||
204 | list_for_each_entry(p, &clock_list, node) { | ||
205 | if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | ||
206 | clk = p; | ||
207 | break; | ||
208 | } | ||
209 | } | ||
210 | up(&clock_list_sem); | ||
211 | |||
212 | return clk; | ||
213 | } | ||
214 | |||
215 | void clk_put(struct clk *clk) | ||
216 | { | ||
217 | if (clk && !IS_ERR(clk)) | ||
218 | module_put(clk->owner); | ||
219 | } | ||
220 | |||
221 | void __init __attribute__ ((weak)) | ||
222 | arch_init_clk_ops(struct clk_ops **ops, int type) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | int __init clk_init(void) | ||
227 | { | ||
228 | int i, ret = 0; | ||
229 | |||
230 | if (unlikely(!master_clk.rate)) | ||
231 | /* | ||
232 | * NOTE: This will break if the default divisor has been | ||
233 | * changed. | ||
234 | * | ||
235 | * No one should be changing the default on us however, | ||
236 | * expect that a sane value for CONFIG_SH_PCLK_FREQ will | ||
237 | * be defined in the event of a different divisor. | ||
238 | */ | ||
239 | master_clk.rate = get_timer_frequency() * 4; | ||
240 | |||
241 | for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { | ||
242 | struct clk *clk = onchip_clocks[i]; | ||
243 | |||
244 | arch_init_clk_ops(&clk->ops, i); | ||
245 | ret |= clk_register(clk); | ||
246 | clk_enable(clk); | ||
247 | } | ||
248 | |||
249 | /* Kick the child clocks.. */ | ||
250 | propagate_rate(&master_clk); | ||
251 | propagate_rate(&bus_clk); | ||
252 | |||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | int show_clocks(struct seq_file *m) | ||
257 | { | ||
258 | struct clk *clk; | ||
259 | |||
260 | list_for_each_entry_reverse(clk, &clock_list, node) { | ||
261 | unsigned long rate = clk_get_rate(clk); | ||
262 | |||
263 | /* | ||
264 | * Don't bother listing dummy clocks with no ancestry | ||
265 | * that only support enable and disable ops. | ||
266 | */ | ||
267 | if (unlikely(!rate && !clk->parent)) | ||
268 | continue; | ||
269 | |||
270 | seq_printf(m, "%-12s\t: %ld.%02ldMHz\n", clk->name, | ||
271 | rate / 1000000, (rate % 1000000) / 10000); | ||
272 | } | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | EXPORT_SYMBOL_GPL(clk_register); | ||
278 | EXPORT_SYMBOL_GPL(clk_unregister); | ||
279 | EXPORT_SYMBOL_GPL(clk_get); | ||
280 | EXPORT_SYMBOL_GPL(clk_put); | ||
281 | EXPORT_SYMBOL_GPL(clk_enable); | ||
282 | EXPORT_SYMBOL_GPL(clk_disable); | ||
283 | EXPORT_SYMBOL_GPL(__clk_enable); | ||
284 | EXPORT_SYMBOL_GPL(__clk_disable); | ||
285 | EXPORT_SYMBOL_GPL(clk_get_rate); | ||
286 | EXPORT_SYMBOL_GPL(clk_set_rate); | ||
287 | EXPORT_SYMBOL_GPL(clk_recalc_rate); | ||