diff options
Diffstat (limited to 'arch/c6x/platforms')
-rw-r--r-- | arch/c6x/platforms/Kconfig | 16 | ||||
-rw-r--r-- | arch/c6x/platforms/Makefile | 12 | ||||
-rw-r--r-- | arch/c6x/platforms/cache.c | 445 | ||||
-rw-r--r-- | arch/c6x/platforms/dscr.c | 598 | ||||
-rw-r--r-- | arch/c6x/platforms/emif.c | 87 | ||||
-rw-r--r-- | arch/c6x/platforms/megamod-pic.c | 349 | ||||
-rw-r--r-- | arch/c6x/platforms/platform.c | 17 | ||||
-rw-r--r-- | arch/c6x/platforms/pll.c | 444 | ||||
-rw-r--r-- | arch/c6x/platforms/plldata.c | 404 | ||||
-rw-r--r-- | arch/c6x/platforms/timer64.c | 244 |
10 files changed, 2616 insertions, 0 deletions
diff --git a/arch/c6x/platforms/Kconfig b/arch/c6x/platforms/Kconfig new file mode 100644 index 000000000000..401ee678fd01 --- /dev/null +++ b/arch/c6x/platforms/Kconfig | |||
@@ -0,0 +1,16 @@ | |||
1 | |||
2 | config SOC_TMS320C6455 | ||
3 | bool "TMS320C6455" | ||
4 | default n | ||
5 | |||
6 | config SOC_TMS320C6457 | ||
7 | bool "TMS320C6457" | ||
8 | default n | ||
9 | |||
10 | config SOC_TMS320C6472 | ||
11 | bool "TMS320C6472" | ||
12 | default n | ||
13 | |||
14 | config SOC_TMS320C6474 | ||
15 | bool "TMS320C6474" | ||
16 | default n | ||
diff --git a/arch/c6x/platforms/Makefile b/arch/c6x/platforms/Makefile new file mode 100644 index 000000000000..9a95b9bca8d0 --- /dev/null +++ b/arch/c6x/platforms/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for arch/c6x/platforms | ||
3 | # | ||
4 | # Copyright 2010, 2011 Texas Instruments Incorporated | ||
5 | # | ||
6 | |||
7 | obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o | ||
8 | obj-y += dscr.o | ||
9 | |||
10 | # SoC objects | ||
11 | obj-$(CONFIG_SOC_TMS320C6455) += emif.o | ||
12 | obj-$(CONFIG_SOC_TMS320C6457) += emif.o | ||
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c new file mode 100644 index 000000000000..86318a16a252 --- /dev/null +++ b/arch/c6x/platforms/cache.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
3 | * Author: Mark Salter <msalter@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/of.h> | ||
10 | #include <linux/of_address.h> | ||
11 | #include <linux/io.h> | ||
12 | |||
13 | #include <asm/cache.h> | ||
14 | #include <asm/soc.h> | ||
15 | |||
16 | /* | ||
17 | * Internal Memory Control Registers for caches | ||
18 | */ | ||
19 | #define IMCR_CCFG 0x0000 | ||
20 | #define IMCR_L1PCFG 0x0020 | ||
21 | #define IMCR_L1PCC 0x0024 | ||
22 | #define IMCR_L1DCFG 0x0040 | ||
23 | #define IMCR_L1DCC 0x0044 | ||
24 | #define IMCR_L2ALLOC0 0x2000 | ||
25 | #define IMCR_L2ALLOC1 0x2004 | ||
26 | #define IMCR_L2ALLOC2 0x2008 | ||
27 | #define IMCR_L2ALLOC3 0x200c | ||
28 | #define IMCR_L2WBAR 0x4000 | ||
29 | #define IMCR_L2WWC 0x4004 | ||
30 | #define IMCR_L2WIBAR 0x4010 | ||
31 | #define IMCR_L2WIWC 0x4014 | ||
32 | #define IMCR_L2IBAR 0x4018 | ||
33 | #define IMCR_L2IWC 0x401c | ||
34 | #define IMCR_L1PIBAR 0x4020 | ||
35 | #define IMCR_L1PIWC 0x4024 | ||
36 | #define IMCR_L1DWIBAR 0x4030 | ||
37 | #define IMCR_L1DWIWC 0x4034 | ||
38 | #define IMCR_L1DWBAR 0x4040 | ||
39 | #define IMCR_L1DWWC 0x4044 | ||
40 | #define IMCR_L1DIBAR 0x4048 | ||
41 | #define IMCR_L1DIWC 0x404c | ||
42 | #define IMCR_L2WB 0x5000 | ||
43 | #define IMCR_L2WBINV 0x5004 | ||
44 | #define IMCR_L2INV 0x5008 | ||
45 | #define IMCR_L1PINV 0x5028 | ||
46 | #define IMCR_L1DWB 0x5040 | ||
47 | #define IMCR_L1DWBINV 0x5044 | ||
48 | #define IMCR_L1DINV 0x5048 | ||
49 | #define IMCR_MAR_BASE 0x8000 | ||
50 | #define IMCR_MAR96_111 0x8180 | ||
51 | #define IMCR_MAR128_191 0x8200 | ||
52 | #define IMCR_MAR224_239 0x8380 | ||
53 | #define IMCR_L2MPFAR 0xa000 | ||
54 | #define IMCR_L2MPFSR 0xa004 | ||
55 | #define IMCR_L2MPFCR 0xa008 | ||
56 | #define IMCR_L2MPLK0 0xa100 | ||
57 | #define IMCR_L2MPLK1 0xa104 | ||
58 | #define IMCR_L2MPLK2 0xa108 | ||
59 | #define IMCR_L2MPLK3 0xa10c | ||
60 | #define IMCR_L2MPLKCMD 0xa110 | ||
61 | #define IMCR_L2MPLKSTAT 0xa114 | ||
62 | #define IMCR_L2MPPA_BASE 0xa200 | ||
63 | #define IMCR_L1PMPFAR 0xa400 | ||
64 | #define IMCR_L1PMPFSR 0xa404 | ||
65 | #define IMCR_L1PMPFCR 0xa408 | ||
66 | #define IMCR_L1PMPLK0 0xa500 | ||
67 | #define IMCR_L1PMPLK1 0xa504 | ||
68 | #define IMCR_L1PMPLK2 0xa508 | ||
69 | #define IMCR_L1PMPLK3 0xa50c | ||
70 | #define IMCR_L1PMPLKCMD 0xa510 | ||
71 | #define IMCR_L1PMPLKSTAT 0xa514 | ||
72 | #define IMCR_L1PMPPA_BASE 0xa600 | ||
73 | #define IMCR_L1DMPFAR 0xac00 | ||
74 | #define IMCR_L1DMPFSR 0xac04 | ||
75 | #define IMCR_L1DMPFCR 0xac08 | ||
76 | #define IMCR_L1DMPLK0 0xad00 | ||
77 | #define IMCR_L1DMPLK1 0xad04 | ||
78 | #define IMCR_L1DMPLK2 0xad08 | ||
79 | #define IMCR_L1DMPLK3 0xad0c | ||
80 | #define IMCR_L1DMPLKCMD 0xad10 | ||
81 | #define IMCR_L1DMPLKSTAT 0xad14 | ||
82 | #define IMCR_L1DMPPA_BASE 0xae00 | ||
83 | #define IMCR_L2PDWAKE0 0xc040 | ||
84 | #define IMCR_L2PDWAKE1 0xc044 | ||
85 | #define IMCR_L2PDSLEEP0 0xc050 | ||
86 | #define IMCR_L2PDSLEEP1 0xc054 | ||
87 | #define IMCR_L2PDSTAT0 0xc060 | ||
88 | #define IMCR_L2PDSTAT1 0xc064 | ||
89 | |||
90 | /* | ||
91 | * CCFG register values and bits | ||
92 | */ | ||
93 | #define L2MODE_0K_CACHE 0x0 | ||
94 | #define L2MODE_32K_CACHE 0x1 | ||
95 | #define L2MODE_64K_CACHE 0x2 | ||
96 | #define L2MODE_128K_CACHE 0x3 | ||
97 | #define L2MODE_256K_CACHE 0x7 | ||
98 | |||
99 | #define L2PRIO_URGENT 0x0 | ||
100 | #define L2PRIO_HIGH 0x1 | ||
101 | #define L2PRIO_MEDIUM 0x2 | ||
102 | #define L2PRIO_LOW 0x3 | ||
103 | |||
104 | #define CCFG_ID 0x100 /* Invalidate L1P bit */ | ||
105 | #define CCFG_IP 0x200 /* Invalidate L1D bit */ | ||
106 | |||
107 | static void __iomem *cache_base; | ||
108 | |||
109 | /* | ||
110 | * L1 & L2 caches generic functions | ||
111 | */ | ||
112 | #define imcr_get(reg) soc_readl(cache_base + (reg)) | ||
113 | #define imcr_set(reg, value) \ | ||
114 | do { \ | ||
115 | soc_writel((value), cache_base + (reg)); \ | ||
116 | soc_readl(cache_base + (reg)); \ | ||
117 | } while (0) | ||
118 | |||
119 | static void cache_block_operation_wait(unsigned int wc_reg) | ||
120 | { | ||
121 | /* Wait for completion */ | ||
122 | while (imcr_get(wc_reg)) | ||
123 | cpu_relax(); | ||
124 | } | ||
125 | |||
126 | static DEFINE_SPINLOCK(cache_lock); | ||
127 | |||
128 | /* | ||
129 | * Generic function to perform a block cache operation as | ||
130 | * invalidate or writeback/invalidate | ||
131 | */ | ||
132 | static void cache_block_operation(unsigned int *start, | ||
133 | unsigned int *end, | ||
134 | unsigned int bar_reg, | ||
135 | unsigned int wc_reg) | ||
136 | { | ||
137 | unsigned long flags; | ||
138 | unsigned int wcnt = | ||
139 | (L2_CACHE_ALIGN_CNT((unsigned int) end) | ||
140 | - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; | ||
141 | unsigned int wc = 0; | ||
142 | |||
143 | for (; wcnt; wcnt -= wc, start += wc) { | ||
144 | loop: | ||
145 | spin_lock_irqsave(&cache_lock, flags); | ||
146 | |||
147 | /* | ||
148 | * If another cache operation is occuring | ||
149 | */ | ||
150 | if (unlikely(imcr_get(wc_reg))) { | ||
151 | spin_unlock_irqrestore(&cache_lock, flags); | ||
152 | |||
153 | /* Wait for previous operation completion */ | ||
154 | cache_block_operation_wait(wc_reg); | ||
155 | |||
156 | /* Try again */ | ||
157 | goto loop; | ||
158 | } | ||
159 | |||
160 | imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); | ||
161 | |||
162 | if (wcnt > 0xffff) | ||
163 | wc = 0xffff; | ||
164 | else | ||
165 | wc = wcnt; | ||
166 | |||
167 | /* Set word count value in the WC register */ | ||
168 | imcr_set(wc_reg, wc & 0xffff); | ||
169 | |||
170 | spin_unlock_irqrestore(&cache_lock, flags); | ||
171 | |||
172 | /* Wait for completion */ | ||
173 | cache_block_operation_wait(wc_reg); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | static void cache_block_operation_nowait(unsigned int *start, | ||
178 | unsigned int *end, | ||
179 | unsigned int bar_reg, | ||
180 | unsigned int wc_reg) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | unsigned int wcnt = | ||
184 | (L2_CACHE_ALIGN_CNT((unsigned int) end) | ||
185 | - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; | ||
186 | unsigned int wc = 0; | ||
187 | |||
188 | for (; wcnt; wcnt -= wc, start += wc) { | ||
189 | |||
190 | spin_lock_irqsave(&cache_lock, flags); | ||
191 | |||
192 | imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); | ||
193 | |||
194 | if (wcnt > 0xffff) | ||
195 | wc = 0xffff; | ||
196 | else | ||
197 | wc = wcnt; | ||
198 | |||
199 | /* Set word count value in the WC register */ | ||
200 | imcr_set(wc_reg, wc & 0xffff); | ||
201 | |||
202 | spin_unlock_irqrestore(&cache_lock, flags); | ||
203 | |||
204 | /* Don't wait for completion on last cache operation */ | ||
205 | if (wcnt > 0xffff) | ||
206 | cache_block_operation_wait(wc_reg); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * L1 caches management | ||
212 | */ | ||
213 | |||
214 | /* | ||
215 | * Disable L1 caches | ||
216 | */ | ||
217 | void L1_cache_off(void) | ||
218 | { | ||
219 | unsigned int dummy; | ||
220 | |||
221 | imcr_set(IMCR_L1PCFG, 0); | ||
222 | dummy = imcr_get(IMCR_L1PCFG); | ||
223 | |||
224 | imcr_set(IMCR_L1DCFG, 0); | ||
225 | dummy = imcr_get(IMCR_L1DCFG); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Enable L1 caches | ||
230 | */ | ||
231 | void L1_cache_on(void) | ||
232 | { | ||
233 | unsigned int dummy; | ||
234 | |||
235 | imcr_set(IMCR_L1PCFG, 7); | ||
236 | dummy = imcr_get(IMCR_L1PCFG); | ||
237 | |||
238 | imcr_set(IMCR_L1DCFG, 7); | ||
239 | dummy = imcr_get(IMCR_L1DCFG); | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * L1P global-invalidate all | ||
244 | */ | ||
245 | void L1P_cache_global_invalidate(void) | ||
246 | { | ||
247 | unsigned int set = 1; | ||
248 | imcr_set(IMCR_L1PINV, set); | ||
249 | while (imcr_get(IMCR_L1PINV) & 1) | ||
250 | cpu_relax(); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * L1D global-invalidate all | ||
255 | * | ||
256 | * Warning: this operation causes all updated data in L1D to | ||
257 | * be discarded rather than written back to the lower levels of | ||
258 | * memory | ||
259 | */ | ||
260 | void L1D_cache_global_invalidate(void) | ||
261 | { | ||
262 | unsigned int set = 1; | ||
263 | imcr_set(IMCR_L1DINV, set); | ||
264 | while (imcr_get(IMCR_L1DINV) & 1) | ||
265 | cpu_relax(); | ||
266 | } | ||
267 | |||
268 | void L1D_cache_global_writeback(void) | ||
269 | { | ||
270 | unsigned int set = 1; | ||
271 | imcr_set(IMCR_L1DWB, set); | ||
272 | while (imcr_get(IMCR_L1DWB) & 1) | ||
273 | cpu_relax(); | ||
274 | } | ||
275 | |||
276 | void L1D_cache_global_writeback_invalidate(void) | ||
277 | { | ||
278 | unsigned int set = 1; | ||
279 | imcr_set(IMCR_L1DWBINV, set); | ||
280 | while (imcr_get(IMCR_L1DWBINV) & 1) | ||
281 | cpu_relax(); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * L2 caches management | ||
286 | */ | ||
287 | |||
288 | /* | ||
289 | * Set L2 operation mode | ||
290 | */ | ||
291 | void L2_cache_set_mode(unsigned int mode) | ||
292 | { | ||
293 | unsigned int ccfg = imcr_get(IMCR_CCFG); | ||
294 | |||
295 | /* Clear and set the L2MODE bits in CCFG */ | ||
296 | ccfg &= ~7; | ||
297 | ccfg |= (mode & 7); | ||
298 | imcr_set(IMCR_CCFG, ccfg); | ||
299 | ccfg = imcr_get(IMCR_CCFG); | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * L2 global-writeback and global-invalidate all | ||
304 | */ | ||
305 | void L2_cache_global_writeback_invalidate(void) | ||
306 | { | ||
307 | imcr_set(IMCR_L2WBINV, 1); | ||
308 | while (imcr_get(IMCR_L2WBINV)) | ||
309 | cpu_relax(); | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * L2 global-writeback all | ||
314 | */ | ||
315 | void L2_cache_global_writeback(void) | ||
316 | { | ||
317 | imcr_set(IMCR_L2WB, 1); | ||
318 | while (imcr_get(IMCR_L2WB)) | ||
319 | cpu_relax(); | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Cacheability controls | ||
324 | */ | ||
325 | void enable_caching(unsigned long start, unsigned long end) | ||
326 | { | ||
327 | unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); | ||
328 | unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); | ||
329 | |||
330 | for (; mar <= mar_e; mar += 4) | ||
331 | imcr_set(mar, imcr_get(mar) | 1); | ||
332 | } | ||
333 | |||
334 | void disable_caching(unsigned long start, unsigned long end) | ||
335 | { | ||
336 | unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); | ||
337 | unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); | ||
338 | |||
339 | for (; mar <= mar_e; mar += 4) | ||
340 | imcr_set(mar, imcr_get(mar) & ~1); | ||
341 | } | ||
342 | |||
343 | |||
344 | /* | ||
345 | * L1 block operations | ||
346 | */ | ||
347 | void L1P_cache_block_invalidate(unsigned int start, unsigned int end) | ||
348 | { | ||
349 | cache_block_operation((unsigned int *) start, | ||
350 | (unsigned int *) end, | ||
351 | IMCR_L1PIBAR, IMCR_L1PIWC); | ||
352 | } | ||
353 | |||
354 | void L1D_cache_block_invalidate(unsigned int start, unsigned int end) | ||
355 | { | ||
356 | cache_block_operation((unsigned int *) start, | ||
357 | (unsigned int *) end, | ||
358 | IMCR_L1DIBAR, IMCR_L1DIWC); | ||
359 | } | ||
360 | |||
361 | void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end) | ||
362 | { | ||
363 | cache_block_operation((unsigned int *) start, | ||
364 | (unsigned int *) end, | ||
365 | IMCR_L1DWIBAR, IMCR_L1DWIWC); | ||
366 | } | ||
367 | |||
368 | void L1D_cache_block_writeback(unsigned int start, unsigned int end) | ||
369 | { | ||
370 | cache_block_operation((unsigned int *) start, | ||
371 | (unsigned int *) end, | ||
372 | IMCR_L1DWBAR, IMCR_L1DWWC); | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * L2 block operations | ||
377 | */ | ||
378 | void L2_cache_block_invalidate(unsigned int start, unsigned int end) | ||
379 | { | ||
380 | cache_block_operation((unsigned int *) start, | ||
381 | (unsigned int *) end, | ||
382 | IMCR_L2IBAR, IMCR_L2IWC); | ||
383 | } | ||
384 | |||
385 | void L2_cache_block_writeback(unsigned int start, unsigned int end) | ||
386 | { | ||
387 | cache_block_operation((unsigned int *) start, | ||
388 | (unsigned int *) end, | ||
389 | IMCR_L2WBAR, IMCR_L2WWC); | ||
390 | } | ||
391 | |||
392 | void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end) | ||
393 | { | ||
394 | cache_block_operation((unsigned int *) start, | ||
395 | (unsigned int *) end, | ||
396 | IMCR_L2WIBAR, IMCR_L2WIWC); | ||
397 | } | ||
398 | |||
399 | void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end) | ||
400 | { | ||
401 | cache_block_operation_nowait((unsigned int *) start, | ||
402 | (unsigned int *) end, | ||
403 | IMCR_L2IBAR, IMCR_L2IWC); | ||
404 | } | ||
405 | |||
406 | void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end) | ||
407 | { | ||
408 | cache_block_operation_nowait((unsigned int *) start, | ||
409 | (unsigned int *) end, | ||
410 | IMCR_L2WBAR, IMCR_L2WWC); | ||
411 | } | ||
412 | |||
413 | void L2_cache_block_writeback_invalidate_nowait(unsigned int start, | ||
414 | unsigned int end) | ||
415 | { | ||
416 | cache_block_operation_nowait((unsigned int *) start, | ||
417 | (unsigned int *) end, | ||
418 | IMCR_L2WIBAR, IMCR_L2WIWC); | ||
419 | } | ||
420 | |||
421 | |||
422 | /* | ||
423 | * L1 and L2 caches configuration | ||
424 | */ | ||
425 | void __init c6x_cache_init(void) | ||
426 | { | ||
427 | struct device_node *node; | ||
428 | |||
429 | node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache"); | ||
430 | if (!node) | ||
431 | return; | ||
432 | |||
433 | cache_base = of_iomap(node, 0); | ||
434 | |||
435 | of_node_put(node); | ||
436 | |||
437 | if (!cache_base) | ||
438 | return; | ||
439 | |||
440 | /* Set L2 caches on the the whole L2 SRAM memory */ | ||
441 | L2_cache_set_mode(L2MODE_SIZE); | ||
442 | |||
443 | /* Enable L1 */ | ||
444 | L1_cache_on(); | ||
445 | } | ||
diff --git a/arch/c6x/platforms/dscr.c b/arch/c6x/platforms/dscr.c new file mode 100644 index 000000000000..f848a65ee646 --- /dev/null +++ b/arch/c6x/platforms/dscr.c | |||
@@ -0,0 +1,598 @@ | |||
1 | /* | ||
2 | * Device State Control Registers driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
5 | * Author: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * The Device State Control Registers (DSCR) provide SoC level control over | ||
14 | * a number of peripherals. Details vary considerably among the various SoC | ||
15 | * parts. In general, the DSCR block will provide one or more configuration | ||
16 | * registers often protected by a lock register. One or more key values must | ||
17 | * be written to a lock register in order to unlock the configuration register. | ||
18 | * The configuration register may be used to enable (and disable in some | ||
19 | * cases) SoC pin drivers, peripheral clock sources (internal or pin), etc. | ||
20 | * In some cases, a configuration register is write once or the individual | ||
21 | * bits are write once. That is, you may be able to enable a device, but | ||
22 | * will not be able to disable it. | ||
23 | * | ||
24 | * In addition to device configuration, the DSCR block may provide registers | ||
25 | * which are used to reset SoC peripherals, provide device ID information, | ||
26 | * provide MAC addresses, and other miscellaneous functions. | ||
27 | */ | ||
28 | |||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_address.h> | ||
31 | #include <linux/of_platform.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/io.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <asm/soc.h> | ||
36 | #include <asm/dscr.h> | ||
37 | |||
38 | #define MAX_DEVSTATE_IDS 32 | ||
39 | #define MAX_DEVCTL_REGS 8 | ||
40 | #define MAX_DEVSTAT_REGS 8 | ||
41 | #define MAX_LOCKED_REGS 4 | ||
42 | #define MAX_SOC_EMACS 2 | ||
43 | |||
44 | struct rmii_reset_reg { | ||
45 | u32 reg; | ||
46 | u32 mask; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * Some registerd may be locked. In order to write to these | ||
51 | * registers, the key value must first be written to the lockreg. | ||
52 | */ | ||
53 | struct locked_reg { | ||
54 | u32 reg; /* offset from base */ | ||
55 | u32 lockreg; /* offset from base */ | ||
56 | u32 key; /* unlock key */ | ||
57 | }; | ||
58 | |||
59 | /* | ||
60 | * This describes a contiguous area of like control bits used to enable/disable | ||
61 | * SoC devices. Each controllable device is given an ID which is used by the | ||
62 | * individual device drivers to control the device state. These IDs start at | ||
63 | * zero and are assigned sequentially to the control bitfield ranges described | ||
64 | * by this structure. | ||
65 | */ | ||
66 | struct devstate_ctl_reg { | ||
67 | u32 reg; /* register holding the control bits */ | ||
68 | u8 start_id; /* start id of this range */ | ||
69 | u8 num_ids; /* number of devices in this range */ | ||
70 | u8 enable_only; /* bits are write-once to enable only */ | ||
71 | u8 enable; /* value used to enable device */ | ||
72 | u8 disable; /* value used to disable device */ | ||
73 | u8 shift; /* starting (rightmost) bit in range */ | ||
74 | u8 nbits; /* number of bits per device */ | ||
75 | }; | ||
76 | |||
77 | |||
78 | /* | ||
79 | * This describes a region of status bits indicating the state of | ||
80 | * various devices. This is used internally to wait for status | ||
81 | * change completion when enabling/disabling a device. Status is | ||
82 | * optional and not all device controls will have a corresponding | ||
83 | * status. | ||
84 | */ | ||
85 | struct devstate_stat_reg { | ||
86 | u32 reg; /* register holding the status bits */ | ||
87 | u8 start_id; /* start id of this range */ | ||
88 | u8 num_ids; /* number of devices in this range */ | ||
89 | u8 enable; /* value indicating enabled state */ | ||
90 | u8 disable; /* value indicating disabled state */ | ||
91 | u8 shift; /* starting (rightmost) bit in range */ | ||
92 | u8 nbits; /* number of bits per device */ | ||
93 | }; | ||
94 | |||
95 | struct devstate_info { | ||
96 | struct devstate_ctl_reg *ctl; | ||
97 | struct devstate_stat_reg *stat; | ||
98 | }; | ||
99 | |||
100 | /* These are callbacks to SOC-specific code. */ | ||
101 | struct dscr_ops { | ||
102 | void (*init)(struct device_node *node); | ||
103 | }; | ||
104 | |||
105 | struct dscr_regs { | ||
106 | spinlock_t lock; | ||
107 | void __iomem *base; | ||
108 | u32 kick_reg[2]; | ||
109 | u32 kick_key[2]; | ||
110 | struct locked_reg locked[MAX_LOCKED_REGS]; | ||
111 | struct devstate_info devstate_info[MAX_DEVSTATE_IDS]; | ||
112 | struct rmii_reset_reg rmii_resets[MAX_SOC_EMACS]; | ||
113 | struct devstate_ctl_reg devctl[MAX_DEVCTL_REGS]; | ||
114 | struct devstate_stat_reg devstat[MAX_DEVSTAT_REGS]; | ||
115 | }; | ||
116 | |||
117 | static struct dscr_regs dscr; | ||
118 | |||
119 | static struct locked_reg *find_locked_reg(u32 reg) | ||
120 | { | ||
121 | int i; | ||
122 | |||
123 | for (i = 0; i < MAX_LOCKED_REGS; i++) | ||
124 | if (dscr.locked[i].key && reg == dscr.locked[i].reg) | ||
125 | return &dscr.locked[i]; | ||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Write to a register with one lock | ||
131 | */ | ||
132 | static void dscr_write_locked1(u32 reg, u32 val, | ||
133 | u32 lock, u32 key) | ||
134 | { | ||
135 | void __iomem *reg_addr = dscr.base + reg; | ||
136 | void __iomem *lock_addr = dscr.base + lock; | ||
137 | |||
138 | /* | ||
139 | * For some registers, the lock is relocked after a short number | ||
140 | * of cycles. We have to put the lock write and register write in | ||
141 | * the same fetch packet to meet this timing. The .align ensures | ||
142 | * the two stw instructions are in the same fetch packet. | ||
143 | */ | ||
144 | asm volatile ("b .s2 0f\n" | ||
145 | "nop 5\n" | ||
146 | " .align 5\n" | ||
147 | "0:\n" | ||
148 | "stw .D1T2 %3,*%2\n" | ||
149 | "stw .D1T2 %1,*%0\n" | ||
150 | : | ||
151 | : "a"(reg_addr), "b"(val), "a"(lock_addr), "b"(key) | ||
152 | ); | ||
153 | |||
154 | /* in case the hw doesn't reset the lock */ | ||
155 | soc_writel(0, lock_addr); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Write to a register protected by two lock registers | ||
160 | */ | ||
161 | static void dscr_write_locked2(u32 reg, u32 val, | ||
162 | u32 lock0, u32 key0, | ||
163 | u32 lock1, u32 key1) | ||
164 | { | ||
165 | soc_writel(key0, dscr.base + lock0); | ||
166 | soc_writel(key1, dscr.base + lock1); | ||
167 | soc_writel(val, dscr.base + reg); | ||
168 | soc_writel(0, dscr.base + lock0); | ||
169 | soc_writel(0, dscr.base + lock1); | ||
170 | } | ||
171 | |||
172 | static void dscr_write(u32 reg, u32 val) | ||
173 | { | ||
174 | struct locked_reg *lock; | ||
175 | |||
176 | lock = find_locked_reg(reg); | ||
177 | if (lock) | ||
178 | dscr_write_locked1(reg, val, lock->lockreg, lock->key); | ||
179 | else if (dscr.kick_key[0]) | ||
180 | dscr_write_locked2(reg, val, dscr.kick_reg[0], dscr.kick_key[0], | ||
181 | dscr.kick_reg[1], dscr.kick_key[1]); | ||
182 | else | ||
183 | soc_writel(val, dscr.base + reg); | ||
184 | } | ||
185 | |||
186 | |||
187 | /* | ||
188 | * Drivers can use this interface to enable/disable SoC IP blocks. | ||
189 | */ | ||
190 | void dscr_set_devstate(int id, enum dscr_devstate_t state) | ||
191 | { | ||
192 | struct devstate_ctl_reg *ctl; | ||
193 | struct devstate_stat_reg *stat; | ||
194 | struct devstate_info *info; | ||
195 | u32 ctl_val, val; | ||
196 | int ctl_shift, ctl_mask; | ||
197 | unsigned long flags; | ||
198 | |||
199 | if (!dscr.base) | ||
200 | return; | ||
201 | |||
202 | if (id < 0 || id >= MAX_DEVSTATE_IDS) | ||
203 | return; | ||
204 | |||
205 | info = &dscr.devstate_info[id]; | ||
206 | ctl = info->ctl; | ||
207 | stat = info->stat; | ||
208 | |||
209 | if (ctl == NULL) | ||
210 | return; | ||
211 | |||
212 | ctl_shift = ctl->shift + ctl->nbits * (id - ctl->start_id); | ||
213 | ctl_mask = ((1 << ctl->nbits) - 1) << ctl_shift; | ||
214 | |||
215 | switch (state) { | ||
216 | case DSCR_DEVSTATE_ENABLED: | ||
217 | ctl_val = ctl->enable << ctl_shift; | ||
218 | break; | ||
219 | case DSCR_DEVSTATE_DISABLED: | ||
220 | if (ctl->enable_only) | ||
221 | return; | ||
222 | ctl_val = ctl->disable << ctl_shift; | ||
223 | break; | ||
224 | default: | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | spin_lock_irqsave(&dscr.lock, flags); | ||
229 | |||
230 | val = soc_readl(dscr.base + ctl->reg); | ||
231 | val &= ~ctl_mask; | ||
232 | val |= ctl_val; | ||
233 | |||
234 | dscr_write(ctl->reg, val); | ||
235 | |||
236 | spin_unlock_irqrestore(&dscr.lock, flags); | ||
237 | |||
238 | if (!stat) | ||
239 | return; | ||
240 | |||
241 | ctl_shift = stat->shift + stat->nbits * (id - stat->start_id); | ||
242 | |||
243 | if (state == DSCR_DEVSTATE_ENABLED) | ||
244 | ctl_val = stat->enable; | ||
245 | else | ||
246 | ctl_val = stat->disable; | ||
247 | |||
248 | do { | ||
249 | val = soc_readl(dscr.base + stat->reg); | ||
250 | val >>= ctl_shift; | ||
251 | val &= ((1 << stat->nbits) - 1); | ||
252 | } while (val != ctl_val); | ||
253 | } | ||
254 | EXPORT_SYMBOL(dscr_set_devstate); | ||
255 | |||
256 | /* | ||
257 | * Drivers can use this to reset RMII module. | ||
258 | */ | ||
259 | void dscr_rmii_reset(int id, int assert) | ||
260 | { | ||
261 | struct rmii_reset_reg *r; | ||
262 | unsigned long flags; | ||
263 | u32 val; | ||
264 | |||
265 | if (id < 0 || id >= MAX_SOC_EMACS) | ||
266 | return; | ||
267 | |||
268 | r = &dscr.rmii_resets[id]; | ||
269 | if (r->mask == 0) | ||
270 | return; | ||
271 | |||
272 | spin_lock_irqsave(&dscr.lock, flags); | ||
273 | |||
274 | val = soc_readl(dscr.base + r->reg); | ||
275 | if (assert) | ||
276 | dscr_write(r->reg, val | r->mask); | ||
277 | else | ||
278 | dscr_write(r->reg, val & ~(r->mask)); | ||
279 | |||
280 | spin_unlock_irqrestore(&dscr.lock, flags); | ||
281 | } | ||
282 | EXPORT_SYMBOL(dscr_rmii_reset); | ||
283 | |||
284 | static void __init dscr_parse_devstat(struct device_node *node, | ||
285 | void __iomem *base) | ||
286 | { | ||
287 | u32 val; | ||
288 | int err; | ||
289 | |||
290 | err = of_property_read_u32_array(node, "ti,dscr-devstat", &val, 1); | ||
291 | if (!err) | ||
292 | c6x_devstat = soc_readl(base + val); | ||
293 | printk(KERN_INFO "DEVSTAT: %08x\n", c6x_devstat); | ||
294 | } | ||
295 | |||
296 | static void __init dscr_parse_silicon_rev(struct device_node *node, | ||
297 | void __iomem *base) | ||
298 | { | ||
299 | u32 vals[3]; | ||
300 | int err; | ||
301 | |||
302 | err = of_property_read_u32_array(node, "ti,dscr-silicon-rev", vals, 3); | ||
303 | if (!err) { | ||
304 | c6x_silicon_rev = soc_readl(base + vals[0]); | ||
305 | c6x_silicon_rev >>= vals[1]; | ||
306 | c6x_silicon_rev &= vals[2]; | ||
307 | } | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Some SoCs will have a pair of fuse registers which hold | ||
312 | * an ethernet MAC address. The "ti,dscr-mac-fuse-regs" | ||
313 | * property is a mapping from fuse register bytes to MAC | ||
314 | * address bytes. The expected format is: | ||
315 | * | ||
316 | * ti,dscr-mac-fuse-regs = <reg0 b3 b2 b1 b0 | ||
317 | * reg1 b3 b2 b1 b0> | ||
318 | * | ||
319 | * reg0 and reg1 are the offsets of the two fuse registers. | ||
320 | * b3-b0 positionally represent bytes within the fuse register. | ||
321 | * b3 is the most significant byte and b0 is the least. | ||
322 | * Allowable values for b3-b0 are: | ||
323 | * | ||
324 | * 0 = fuse register byte not used in MAC address | ||
325 | * 1-6 = index+1 into c6x_fuse_mac[] | ||
326 | */ | ||
327 | static void __init dscr_parse_mac_fuse(struct device_node *node, | ||
328 | void __iomem *base) | ||
329 | { | ||
330 | u32 vals[10], fuse; | ||
331 | int f, i, j, err; | ||
332 | |||
333 | err = of_property_read_u32_array(node, "ti,dscr-mac-fuse-regs", | ||
334 | vals, 10); | ||
335 | if (err) | ||
336 | return; | ||
337 | |||
338 | for (f = 0; f < 2; f++) { | ||
339 | fuse = soc_readl(base + vals[f * 5]); | ||
340 | for (j = (f * 5) + 1, i = 24; i >= 0; i -= 8, j++) | ||
341 | if (vals[j] && vals[j] <= 6) | ||
342 | c6x_fuse_mac[vals[j] - 1] = fuse >> i; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | static void __init dscr_parse_rmii_resets(struct device_node *node, | ||
347 | void __iomem *base) | ||
348 | { | ||
349 | const __be32 *p; | ||
350 | int i, size; | ||
351 | |||
352 | /* look for RMII reset registers */ | ||
353 | p = of_get_property(node, "ti,dscr-rmii-resets", &size); | ||
354 | if (p) { | ||
355 | /* parse all the reg/mask pairs we can handle */ | ||
356 | size /= (sizeof(*p) * 2); | ||
357 | if (size > MAX_SOC_EMACS) | ||
358 | size = MAX_SOC_EMACS; | ||
359 | |||
360 | for (i = 0; i < size; i++) { | ||
361 | dscr.rmii_resets[i].reg = be32_to_cpup(p++); | ||
362 | dscr.rmii_resets[i].mask = be32_to_cpup(p++); | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | |||
368 | static void __init dscr_parse_privperm(struct device_node *node, | ||
369 | void __iomem *base) | ||
370 | { | ||
371 | u32 vals[2]; | ||
372 | int err; | ||
373 | |||
374 | err = of_property_read_u32_array(node, "ti,dscr-privperm", vals, 2); | ||
375 | if (err) | ||
376 | return; | ||
377 | dscr_write(vals[0], vals[1]); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * SoCs may have "locked" DSCR registers which can only be written | ||
382 | * to only after writing a key value to a lock registers. These | ||
383 | * regisers can be described with the "ti,dscr-locked-regs" property. | ||
384 | * This property provides a list of register descriptions with each | ||
385 | * description consisting of three values. | ||
386 | * | ||
387 | * ti,dscr-locked-regs = <reg0 lockreg0 key0 | ||
388 | * ... | ||
389 | * regN lockregN keyN>; | ||
390 | * | ||
391 | * reg is the offset of the locked register | ||
392 | * lockreg is the offset of the lock register | ||
393 | * key is the unlock key written to lockreg | ||
394 | * | ||
395 | */ | ||
396 | static void __init dscr_parse_locked_regs(struct device_node *node, | ||
397 | void __iomem *base) | ||
398 | { | ||
399 | struct locked_reg *r; | ||
400 | const __be32 *p; | ||
401 | int i, size; | ||
402 | |||
403 | p = of_get_property(node, "ti,dscr-locked-regs", &size); | ||
404 | if (p) { | ||
405 | /* parse all the register descriptions we can handle */ | ||
406 | size /= (sizeof(*p) * 3); | ||
407 | if (size > MAX_LOCKED_REGS) | ||
408 | size = MAX_LOCKED_REGS; | ||
409 | |||
410 | for (i = 0; i < size; i++) { | ||
411 | r = &dscr.locked[i]; | ||
412 | |||
413 | r->reg = be32_to_cpup(p++); | ||
414 | r->lockreg = be32_to_cpup(p++); | ||
415 | r->key = be32_to_cpup(p++); | ||
416 | } | ||
417 | } | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * SoCs may have DSCR registers which are only write enabled after | ||
422 | * writing specific key values to two registers. The two key registers | ||
423 | * and the key values can be parsed from a "ti,dscr-kick-regs" | ||
424 | * propety with the following layout: | ||
425 | * | ||
426 | * ti,dscr-kick-regs = <kickreg0 key0 kickreg1 key1> | ||
427 | * | ||
428 | * kickreg is the offset of the "kick" register | ||
429 | * key is the value which unlocks writing for protected regs | ||
430 | */ | ||
431 | static void __init dscr_parse_kick_regs(struct device_node *node, | ||
432 | void __iomem *base) | ||
433 | { | ||
434 | u32 vals[4]; | ||
435 | int err; | ||
436 | |||
437 | err = of_property_read_u32_array(node, "ti,dscr-kick-regs", vals, 4); | ||
438 | if (!err) { | ||
439 | dscr.kick_reg[0] = vals[0]; | ||
440 | dscr.kick_key[0] = vals[1]; | ||
441 | dscr.kick_reg[1] = vals[2]; | ||
442 | dscr.kick_key[1] = vals[3]; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | |||
447 | /* | ||
448 | * SoCs may provide controls to enable/disable individual IP blocks. These | ||
449 | * controls in the DSCR usually control pin drivers but also may control | ||
450 | * clocking and or resets. The device tree is used to describe the bitfields | ||
451 | * in registers used to control device state. The number of bits and their | ||
452 | * values may vary even within the same register. | ||
453 | * | ||
454 | * The layout of these bitfields is described by the ti,dscr-devstate-ctl-regs | ||
455 | * property. This property is a list where each element describes a contiguous | ||
456 | * range of control fields with like properties. Each element of the list | ||
457 | * consists of 7 cells with the following values: | ||
458 | * | ||
459 | * start_id num_ids reg enable disable start_bit nbits | ||
460 | * | ||
461 | * start_id is device id for the first device control in the range | ||
462 | * num_ids is the number of device controls in the range | ||
463 | * reg is the offset of the register holding the control bits | ||
464 | * enable is the value to enable a device | ||
465 | * disable is the value to disable a device (0xffffffff if cannot disable) | ||
466 | * start_bit is the bit number of the first bit in the range | ||
467 | * nbits is the number of bits per device control | ||
468 | */ | ||
469 | static void __init dscr_parse_devstate_ctl_regs(struct device_node *node, | ||
470 | void __iomem *base) | ||
471 | { | ||
472 | struct devstate_ctl_reg *r; | ||
473 | const __be32 *p; | ||
474 | int i, j, size; | ||
475 | |||
476 | p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size); | ||
477 | if (p) { | ||
478 | /* parse all the ranges we can handle */ | ||
479 | size /= (sizeof(*p) * 7); | ||
480 | if (size > MAX_DEVCTL_REGS) | ||
481 | size = MAX_DEVCTL_REGS; | ||
482 | |||
483 | for (i = 0; i < size; i++) { | ||
484 | r = &dscr.devctl[i]; | ||
485 | |||
486 | r->start_id = be32_to_cpup(p++); | ||
487 | r->num_ids = be32_to_cpup(p++); | ||
488 | r->reg = be32_to_cpup(p++); | ||
489 | r->enable = be32_to_cpup(p++); | ||
490 | r->disable = be32_to_cpup(p++); | ||
491 | if (r->disable == 0xffffffff) | ||
492 | r->enable_only = 1; | ||
493 | r->shift = be32_to_cpup(p++); | ||
494 | r->nbits = be32_to_cpup(p++); | ||
495 | |||
496 | for (j = r->start_id; | ||
497 | j < (r->start_id + r->num_ids); | ||
498 | j++) | ||
499 | dscr.devstate_info[j].ctl = r; | ||
500 | } | ||
501 | } | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * SoCs may provide status registers indicating the state (enabled/disabled) of | ||
506 | * devices on the SoC. The device tree is used to describe the bitfields in | ||
507 | * registers used to provide device status. The number of bits and their | ||
508 | * values used to provide status may vary even within the same register. | ||
509 | * | ||
510 | * The layout of these bitfields is described by the ti,dscr-devstate-stat-regs | ||
511 | * property. This property is a list where each element describes a contiguous | ||
512 | * range of status fields with like properties. Each element of the list | ||
513 | * consists of 7 cells with the following values: | ||
514 | * | ||
515 | * start_id num_ids reg enable disable start_bit nbits | ||
516 | * | ||
517 | * start_id is device id for the first device status in the range | ||
518 | * num_ids is the number of devices covered by the range | ||
519 | * reg is the offset of the register holding the status bits | ||
520 | * enable is the value indicating device is enabled | ||
521 | * disable is the value indicating device is disabled | ||
522 | * start_bit is the bit number of the first bit in the range | ||
523 | * nbits is the number of bits per device status | ||
524 | */ | ||
525 | static void __init dscr_parse_devstate_stat_regs(struct device_node *node, | ||
526 | void __iomem *base) | ||
527 | { | ||
528 | struct devstate_stat_reg *r; | ||
529 | const __be32 *p; | ||
530 | int i, j, size; | ||
531 | |||
532 | p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size); | ||
533 | if (p) { | ||
534 | /* parse all the ranges we can handle */ | ||
535 | size /= (sizeof(*p) * 7); | ||
536 | if (size > MAX_DEVSTAT_REGS) | ||
537 | size = MAX_DEVSTAT_REGS; | ||
538 | |||
539 | for (i = 0; i < size; i++) { | ||
540 | r = &dscr.devstat[i]; | ||
541 | |||
542 | r->start_id = be32_to_cpup(p++); | ||
543 | r->num_ids = be32_to_cpup(p++); | ||
544 | r->reg = be32_to_cpup(p++); | ||
545 | r->enable = be32_to_cpup(p++); | ||
546 | r->disable = be32_to_cpup(p++); | ||
547 | r->shift = be32_to_cpup(p++); | ||
548 | r->nbits = be32_to_cpup(p++); | ||
549 | |||
550 | for (j = r->start_id; | ||
551 | j < (r->start_id + r->num_ids); | ||
552 | j++) | ||
553 | dscr.devstate_info[j].stat = r; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | |||
558 | static struct of_device_id dscr_ids[] __initdata = { | ||
559 | { .compatible = "ti,c64x+dscr" }, | ||
560 | {} | ||
561 | }; | ||
562 | |||
563 | /* | ||
564 | * Probe for DSCR area. | ||
565 | * | ||
566 | * This has to be done early on in case timer or interrupt controller | ||
567 | * needs something. e.g. On C6455 SoC, timer must be enabled through | ||
568 | * DSCR before it is functional. | ||
569 | */ | ||
570 | void __init dscr_probe(void) | ||
571 | { | ||
572 | struct device_node *node; | ||
573 | void __iomem *base; | ||
574 | |||
575 | spin_lock_init(&dscr.lock); | ||
576 | |||
577 | node = of_find_matching_node(NULL, dscr_ids); | ||
578 | if (!node) | ||
579 | return; | ||
580 | |||
581 | base = of_iomap(node, 0); | ||
582 | if (!base) { | ||
583 | of_node_put(node); | ||
584 | return; | ||
585 | } | ||
586 | |||
587 | dscr.base = base; | ||
588 | |||
589 | dscr_parse_devstat(node, base); | ||
590 | dscr_parse_silicon_rev(node, base); | ||
591 | dscr_parse_mac_fuse(node, base); | ||
592 | dscr_parse_rmii_resets(node, base); | ||
593 | dscr_parse_locked_regs(node, base); | ||
594 | dscr_parse_kick_regs(node, base); | ||
595 | dscr_parse_devstate_ctl_regs(node, base); | ||
596 | dscr_parse_devstate_stat_regs(node, base); | ||
597 | dscr_parse_privperm(node, base); | ||
598 | } | ||
diff --git a/arch/c6x/platforms/emif.c b/arch/c6x/platforms/emif.c new file mode 100644 index 000000000000..8b564dec241d --- /dev/null +++ b/arch/c6x/platforms/emif.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * External Memory Interface | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
5 | * Author: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/of.h> | ||
12 | #include <linux/of_address.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <asm/soc.h> | ||
15 | #include <asm/dscr.h> | ||
16 | |||
17 | #define NUM_EMIFA_CHIP_ENABLES 4 | ||
18 | |||
19 | struct emifa_regs { | ||
20 | u32 midr; | ||
21 | u32 stat; | ||
22 | u32 reserved1[6]; | ||
23 | u32 bprio; | ||
24 | u32 reserved2[23]; | ||
25 | u32 cecfg[NUM_EMIFA_CHIP_ENABLES]; | ||
26 | u32 reserved3[4]; | ||
27 | u32 awcc; | ||
28 | u32 reserved4[7]; | ||
29 | u32 intraw; | ||
30 | u32 intmsk; | ||
31 | u32 intmskset; | ||
32 | u32 intmskclr; | ||
33 | }; | ||
34 | |||
35 | static struct of_device_id emifa_match[] __initdata = { | ||
36 | { .compatible = "ti,c64x+emifa" }, | ||
37 | {} | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * Parse device tree for existence of an EMIF (External Memory Interface) | ||
42 | * and initialize it if found. | ||
43 | */ | ||
44 | static int __init c6x_emifa_init(void) | ||
45 | { | ||
46 | struct emifa_regs __iomem *regs; | ||
47 | struct device_node *node; | ||
48 | const __be32 *p; | ||
49 | u32 val; | ||
50 | int i, len, err; | ||
51 | |||
52 | node = of_find_matching_node(NULL, emifa_match); | ||
53 | if (!node) | ||
54 | return 0; | ||
55 | |||
56 | regs = of_iomap(node, 0); | ||
57 | if (!regs) | ||
58 | return 0; | ||
59 | |||
60 | /* look for a dscr-based enable for emifa pin buffers */ | ||
61 | err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1); | ||
62 | if (!err) | ||
63 | dscr_set_devstate(val, DSCR_DEVSTATE_ENABLED); | ||
64 | |||
65 | /* set up the chip enables */ | ||
66 | p = of_get_property(node, "ti,emifa-ce-config", &len); | ||
67 | if (p) { | ||
68 | len /= sizeof(u32); | ||
69 | if (len > NUM_EMIFA_CHIP_ENABLES) | ||
70 | len = NUM_EMIFA_CHIP_ENABLES; | ||
71 | for (i = 0; i <= len; i++) | ||
72 | soc_writel(be32_to_cpup(&p[i]), ®s->cecfg[i]); | ||
73 | } | ||
74 | |||
75 | err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1); | ||
76 | if (!err) | ||
77 | soc_writel(val, ®s->bprio); | ||
78 | |||
79 | err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1); | ||
80 | if (!err) | ||
81 | soc_writel(val, ®s->awcc); | ||
82 | |||
83 | iounmap(regs); | ||
84 | of_node_put(node); | ||
85 | return 0; | ||
86 | } | ||
87 | pure_initcall(c6x_emifa_init); | ||
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c new file mode 100644 index 000000000000..7c37a947fb1c --- /dev/null +++ b/arch/c6x/platforms/megamod-pic.c | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * Support for C64x+ Megamodule Interrupt Controller | ||
3 | * | ||
4 | * Copyright (C) 2010, 2011 Texas Instruments Incorporated | ||
5 | * Contributed by: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <asm/soc.h> | ||
19 | #include <asm/megamod-pic.h> | ||
20 | |||
21 | #define NR_COMBINERS 4 | ||
22 | #define NR_MUX_OUTPUTS 12 | ||
23 | |||
24 | #define IRQ_UNMAPPED 0xffff | ||
25 | |||
26 | /* | ||
27 | * Megamodule Interrupt Controller register layout | ||
28 | */ | ||
29 | struct megamod_regs { | ||
30 | u32 evtflag[8]; | ||
31 | u32 evtset[8]; | ||
32 | u32 evtclr[8]; | ||
33 | u32 reserved0[8]; | ||
34 | u32 evtmask[8]; | ||
35 | u32 mevtflag[8]; | ||
36 | u32 expmask[8]; | ||
37 | u32 mexpflag[8]; | ||
38 | u32 intmux_unused; | ||
39 | u32 intmux[7]; | ||
40 | u32 reserved1[8]; | ||
41 | u32 aegmux[2]; | ||
42 | u32 reserved2[14]; | ||
43 | u32 intxstat; | ||
44 | u32 intxclr; | ||
45 | u32 intdmask; | ||
46 | u32 reserved3[13]; | ||
47 | u32 evtasrt; | ||
48 | }; | ||
49 | |||
50 | struct megamod_pic { | ||
51 | struct irq_host *irqhost; | ||
52 | struct megamod_regs __iomem *regs; | ||
53 | raw_spinlock_t lock; | ||
54 | |||
55 | /* hw mux mapping */ | ||
56 | unsigned int output_to_irq[NR_MUX_OUTPUTS]; | ||
57 | }; | ||
58 | |||
59 | static struct megamod_pic *mm_pic; | ||
60 | |||
61 | struct megamod_cascade_data { | ||
62 | struct megamod_pic *pic; | ||
63 | int index; | ||
64 | }; | ||
65 | |||
66 | static struct megamod_cascade_data cascade_data[NR_COMBINERS]; | ||
67 | |||
68 | static void mask_megamod(struct irq_data *data) | ||
69 | { | ||
70 | struct megamod_pic *pic = irq_data_get_irq_chip_data(data); | ||
71 | irq_hw_number_t src = irqd_to_hwirq(data); | ||
72 | u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; | ||
73 | |||
74 | raw_spin_lock(&pic->lock); | ||
75 | soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask); | ||
76 | raw_spin_unlock(&pic->lock); | ||
77 | } | ||
78 | |||
79 | static void unmask_megamod(struct irq_data *data) | ||
80 | { | ||
81 | struct megamod_pic *pic = irq_data_get_irq_chip_data(data); | ||
82 | irq_hw_number_t src = irqd_to_hwirq(data); | ||
83 | u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; | ||
84 | |||
85 | raw_spin_lock(&pic->lock); | ||
86 | soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask); | ||
87 | raw_spin_unlock(&pic->lock); | ||
88 | } | ||
89 | |||
90 | static struct irq_chip megamod_chip = { | ||
91 | .name = "megamod", | ||
92 | .irq_mask = mask_megamod, | ||
93 | .irq_unmask = unmask_megamod, | ||
94 | }; | ||
95 | |||
96 | static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) | ||
97 | { | ||
98 | struct megamod_cascade_data *cascade; | ||
99 | struct megamod_pic *pic; | ||
100 | u32 events; | ||
101 | int n, idx; | ||
102 | |||
103 | cascade = irq_desc_get_handler_data(desc); | ||
104 | |||
105 | pic = cascade->pic; | ||
106 | idx = cascade->index; | ||
107 | |||
108 | while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) { | ||
109 | n = __ffs(events); | ||
110 | |||
111 | irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); | ||
112 | |||
113 | soc_writel(1 << n, &pic->regs->evtclr[idx]); | ||
114 | |||
115 | generic_handle_irq(irq); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static int megamod_map(struct irq_host *h, unsigned int virq, | ||
120 | irq_hw_number_t hw) | ||
121 | { | ||
122 | struct megamod_pic *pic = h->host_data; | ||
123 | int i; | ||
124 | |||
125 | /* We shouldn't see a hwirq which is muxed to core controller */ | ||
126 | for (i = 0; i < NR_MUX_OUTPUTS; i++) | ||
127 | if (pic->output_to_irq[i] == hw) | ||
128 | return -1; | ||
129 | |||
130 | irq_set_chip_data(virq, pic); | ||
131 | irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq); | ||
132 | |||
133 | /* Set default irq type */ | ||
134 | irq_set_irq_type(virq, IRQ_TYPE_NONE); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int megamod_xlate(struct irq_host *h, struct device_node *ct, | ||
140 | const u32 *intspec, unsigned int intsize, | ||
141 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | ||
142 | |||
143 | { | ||
144 | /* megamod intspecs must have 1 cell */ | ||
145 | BUG_ON(intsize != 1); | ||
146 | *out_hwirq = intspec[0]; | ||
147 | *out_type = IRQ_TYPE_NONE; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct irq_host_ops megamod_host_ops = { | ||
152 | .map = megamod_map, | ||
153 | .xlate = megamod_xlate, | ||
154 | }; | ||
155 | |||
156 | static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) | ||
157 | { | ||
158 | int index, offset; | ||
159 | u32 val; | ||
160 | |||
161 | if (src < 0 || src >= (NR_COMBINERS * 32)) { | ||
162 | pic->output_to_irq[output] = IRQ_UNMAPPED; | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | /* four mappings per mux register */ | ||
167 | index = output / 4; | ||
168 | offset = (output & 3) * 8; | ||
169 | |||
170 | val = soc_readl(&pic->regs->intmux[index]); | ||
171 | val &= ~(0xff << offset); | ||
172 | val |= src << offset; | ||
173 | soc_writel(val, &pic->regs->intmux[index]); | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Parse the MUX mapping, if one exists. | ||
178 | * | ||
179 | * The MUX map is an array of up to 12 cells; one for each usable core priority | ||
180 | * interrupt. The value of a given cell is the megamodule interrupt source | ||
181 | * which is to me MUXed to the output corresponding to the cell position | ||
182 | * withing the array. The first cell in the array corresponds to priority | ||
183 | * 4 and the last (12th) cell corresponds to priority 15. The allowed | ||
184 | * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt | ||
185 | * sources (0 - 3) are not allowed to be mapped through this property. They | ||
186 | * are handled through the "interrupts" property. This allows us to use a | ||
187 | * value of zero as a "do not map" placeholder. | ||
188 | */ | ||
189 | static void __init parse_priority_map(struct megamod_pic *pic, | ||
190 | int *mapping, int size) | ||
191 | { | ||
192 | struct device_node *np = pic->irqhost->of_node; | ||
193 | const __be32 *map; | ||
194 | int i, maplen; | ||
195 | u32 val; | ||
196 | |||
197 | map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen); | ||
198 | if (map) { | ||
199 | maplen /= 4; | ||
200 | if (maplen > size) | ||
201 | maplen = size; | ||
202 | |||
203 | for (i = 0; i < maplen; i++) { | ||
204 | val = be32_to_cpup(map); | ||
205 | if (val && val >= 4) | ||
206 | mapping[i] = val; | ||
207 | ++map; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static struct megamod_pic * __init init_megamod_pic(struct device_node *np) | ||
213 | { | ||
214 | struct megamod_pic *pic; | ||
215 | int i, irq; | ||
216 | int mapping[NR_MUX_OUTPUTS]; | ||
217 | |||
218 | pr_info("Initializing C64x+ Megamodule PIC\n"); | ||
219 | |||
220 | pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); | ||
221 | if (!pic) { | ||
222 | pr_err("%s: Could not alloc PIC structure.\n", np->full_name); | ||
223 | return NULL; | ||
224 | } | ||
225 | |||
226 | pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, | ||
227 | NR_COMBINERS * 32, &megamod_host_ops, | ||
228 | IRQ_UNMAPPED); | ||
229 | if (!pic->irqhost) { | ||
230 | pr_err("%s: Could not alloc host.\n", np->full_name); | ||
231 | goto error_free; | ||
232 | } | ||
233 | |||
234 | pic->irqhost->host_data = pic; | ||
235 | |||
236 | raw_spin_lock_init(&pic->lock); | ||
237 | |||
238 | pic->regs = of_iomap(np, 0); | ||
239 | if (!pic->regs) { | ||
240 | pr_err("%s: Could not map registers.\n", np->full_name); | ||
241 | goto error_free; | ||
242 | } | ||
243 | |||
244 | /* Initialize MUX map */ | ||
245 | for (i = 0; i < ARRAY_SIZE(mapping); i++) | ||
246 | mapping[i] = IRQ_UNMAPPED; | ||
247 | |||
248 | parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); | ||
249 | |||
250 | /* | ||
251 | * We can have up to 12 interrupts cascading to the core controller. | ||
252 | * These cascades can be from the combined interrupt sources or for | ||
253 | * individual interrupt sources. The "interrupts" property only | ||
254 | * deals with the cascaded combined interrupts. The individual | ||
255 | * interrupts muxed to the core controller use the core controller | ||
256 | * as their interrupt parent. | ||
257 | */ | ||
258 | for (i = 0; i < NR_COMBINERS; i++) { | ||
259 | |||
260 | irq = irq_of_parse_and_map(np, i); | ||
261 | if (irq == NO_IRQ) | ||
262 | continue; | ||
263 | |||
264 | /* | ||
265 | * We count on the core priority interrupts (4 - 15) being | ||
266 | * direct mapped. Check that device tree provided something | ||
267 | * in that range. | ||
268 | */ | ||
269 | if (irq < 4 || irq >= NR_PRIORITY_IRQS) { | ||
270 | pr_err("%s: combiner-%d virq %d out of range!\n", | ||
271 | np->full_name, i, irq); | ||
272 | continue; | ||
273 | } | ||
274 | |||
275 | /* record the mapping */ | ||
276 | mapping[irq - 4] = i; | ||
277 | |||
278 | pr_debug("%s: combiner-%d cascading to virq %d\n", | ||
279 | np->full_name, i, irq); | ||
280 | |||
281 | cascade_data[i].pic = pic; | ||
282 | cascade_data[i].index = i; | ||
283 | |||
284 | /* mask and clear all events in combiner */ | ||
285 | soc_writel(~0, &pic->regs->evtmask[i]); | ||
286 | soc_writel(~0, &pic->regs->evtclr[i]); | ||
287 | |||
288 | irq_set_handler_data(irq, &cascade_data[i]); | ||
289 | irq_set_chained_handler(irq, megamod_irq_cascade); | ||
290 | } | ||
291 | |||
292 | /* Finally, set up the MUX registers */ | ||
293 | for (i = 0; i < NR_MUX_OUTPUTS; i++) { | ||
294 | if (mapping[i] != IRQ_UNMAPPED) { | ||
295 | pr_debug("%s: setting mux %d to priority %d\n", | ||
296 | np->full_name, mapping[i], i + 4); | ||
297 | set_megamod_mux(pic, mapping[i], i); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | return pic; | ||
302 | |||
303 | error_free: | ||
304 | kfree(pic); | ||
305 | |||
306 | return NULL; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Return next active event after ACK'ing it. | ||
311 | * Return -1 if no events active. | ||
312 | */ | ||
313 | static int get_exception(void) | ||
314 | { | ||
315 | int i, bit; | ||
316 | u32 mask; | ||
317 | |||
318 | for (i = 0; i < NR_COMBINERS; i++) { | ||
319 | mask = soc_readl(&mm_pic->regs->mexpflag[i]); | ||
320 | if (mask) { | ||
321 | bit = __ffs(mask); | ||
322 | soc_writel(1 << bit, &mm_pic->regs->evtclr[i]); | ||
323 | return (i * 32) + bit; | ||
324 | } | ||
325 | } | ||
326 | return -1; | ||
327 | } | ||
328 | |||
329 | static void assert_event(unsigned int val) | ||
330 | { | ||
331 | soc_writel(val, &mm_pic->regs->evtasrt); | ||
332 | } | ||
333 | |||
334 | void __init megamod_pic_init(void) | ||
335 | { | ||
336 | struct device_node *np; | ||
337 | |||
338 | np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic"); | ||
339 | if (!np) | ||
340 | return; | ||
341 | |||
342 | mm_pic = init_megamod_pic(np); | ||
343 | of_node_put(np); | ||
344 | |||
345 | soc_ops.get_exception = get_exception; | ||
346 | soc_ops.assert_event = assert_event; | ||
347 | |||
348 | return; | ||
349 | } | ||
diff --git a/arch/c6x/platforms/platform.c b/arch/c6x/platforms/platform.c new file mode 100644 index 000000000000..26c1a355d600 --- /dev/null +++ b/arch/c6x/platforms/platform.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Texas Instruments Incorporated | ||
3 | * | ||
4 | * This file is licensed under the terms of the GNU General Public License | ||
5 | * version 2. This program is licensed "as is" without any warranty of any | ||
6 | * kind, whether express or implied. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/of_platform.h> | ||
11 | |||
12 | static int __init c6x_device_probe(void) | ||
13 | { | ||
14 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
15 | return 0; | ||
16 | } | ||
17 | core_initcall(c6x_device_probe); | ||
diff --git a/arch/c6x/platforms/pll.c b/arch/c6x/platforms/pll.c new file mode 100644 index 000000000000..3aa898f7ce4d --- /dev/null +++ b/arch/c6x/platforms/pll.c | |||
@@ -0,0 +1,444 @@ | |||
1 | /* | ||
2 | * Clock and PLL control for C64x+ devices | ||
3 | * | ||
4 | * Copyright (C) 2010, 2011 Texas Instruments. | ||
5 | * Contributed by: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * Copied heavily from arm/mach-davinci/clock.c, so: | ||
8 | * | ||
9 | * Copyright (C) 2006-2007 Texas Instruments. | ||
10 | * Copyright (C) 2008-2009 Deep Root Systems, LLC | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/clkdev.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/err.h> | ||
23 | |||
24 | #include <asm/clock.h> | ||
25 | #include <asm/soc.h> | ||
26 | |||
27 | static LIST_HEAD(clocks); | ||
28 | static DEFINE_MUTEX(clocks_mutex); | ||
29 | static DEFINE_SPINLOCK(clockfw_lock); | ||
30 | |||
31 | static void __clk_enable(struct clk *clk) | ||
32 | { | ||
33 | if (clk->parent) | ||
34 | __clk_enable(clk->parent); | ||
35 | clk->usecount++; | ||
36 | } | ||
37 | |||
38 | static void __clk_disable(struct clk *clk) | ||
39 | { | ||
40 | if (WARN_ON(clk->usecount == 0)) | ||
41 | return; | ||
42 | --clk->usecount; | ||
43 | |||
44 | if (clk->parent) | ||
45 | __clk_disable(clk->parent); | ||
46 | } | ||
47 | |||
48 | int clk_enable(struct clk *clk) | ||
49 | { | ||
50 | unsigned long flags; | ||
51 | |||
52 | if (clk == NULL || IS_ERR(clk)) | ||
53 | return -EINVAL; | ||
54 | |||
55 | spin_lock_irqsave(&clockfw_lock, flags); | ||
56 | __clk_enable(clk); | ||
57 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | EXPORT_SYMBOL(clk_enable); | ||
62 | |||
63 | void clk_disable(struct clk *clk) | ||
64 | { | ||
65 | unsigned long flags; | ||
66 | |||
67 | if (clk == NULL || IS_ERR(clk)) | ||
68 | return; | ||
69 | |||
70 | spin_lock_irqsave(&clockfw_lock, flags); | ||
71 | __clk_disable(clk); | ||
72 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
73 | } | ||
74 | EXPORT_SYMBOL(clk_disable); | ||
75 | |||
76 | unsigned long clk_get_rate(struct clk *clk) | ||
77 | { | ||
78 | if (clk == NULL || IS_ERR(clk)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | return clk->rate; | ||
82 | } | ||
83 | EXPORT_SYMBOL(clk_get_rate); | ||
84 | |||
85 | long clk_round_rate(struct clk *clk, unsigned long rate) | ||
86 | { | ||
87 | if (clk == NULL || IS_ERR(clk)) | ||
88 | return -EINVAL; | ||
89 | |||
90 | if (clk->round_rate) | ||
91 | return clk->round_rate(clk, rate); | ||
92 | |||
93 | return clk->rate; | ||
94 | } | ||
95 | EXPORT_SYMBOL(clk_round_rate); | ||
96 | |||
97 | /* Propagate rate to children */ | ||
98 | static void propagate_rate(struct clk *root) | ||
99 | { | ||
100 | struct clk *clk; | ||
101 | |||
102 | list_for_each_entry(clk, &root->children, childnode) { | ||
103 | if (clk->recalc) | ||
104 | clk->rate = clk->recalc(clk); | ||
105 | propagate_rate(clk); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | int clk_set_rate(struct clk *clk, unsigned long rate) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | int ret = -EINVAL; | ||
113 | |||
114 | if (clk == NULL || IS_ERR(clk)) | ||
115 | return ret; | ||
116 | |||
117 | if (clk->set_rate) | ||
118 | ret = clk->set_rate(clk, rate); | ||
119 | |||
120 | spin_lock_irqsave(&clockfw_lock, flags); | ||
121 | if (ret == 0) { | ||
122 | if (clk->recalc) | ||
123 | clk->rate = clk->recalc(clk); | ||
124 | propagate_rate(clk); | ||
125 | } | ||
126 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
127 | |||
128 | return ret; | ||
129 | } | ||
130 | EXPORT_SYMBOL(clk_set_rate); | ||
131 | |||
132 | int clk_set_parent(struct clk *clk, struct clk *parent) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | if (clk == NULL || IS_ERR(clk)) | ||
137 | return -EINVAL; | ||
138 | |||
139 | /* Cannot change parent on enabled clock */ | ||
140 | if (WARN_ON(clk->usecount)) | ||
141 | return -EINVAL; | ||
142 | |||
143 | mutex_lock(&clocks_mutex); | ||
144 | clk->parent = parent; | ||
145 | list_del_init(&clk->childnode); | ||
146 | list_add(&clk->childnode, &clk->parent->children); | ||
147 | mutex_unlock(&clocks_mutex); | ||
148 | |||
149 | spin_lock_irqsave(&clockfw_lock, flags); | ||
150 | if (clk->recalc) | ||
151 | clk->rate = clk->recalc(clk); | ||
152 | propagate_rate(clk); | ||
153 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | EXPORT_SYMBOL(clk_set_parent); | ||
158 | |||
159 | int clk_register(struct clk *clk) | ||
160 | { | ||
161 | if (clk == NULL || IS_ERR(clk)) | ||
162 | return -EINVAL; | ||
163 | |||
164 | if (WARN(clk->parent && !clk->parent->rate, | ||
165 | "CLK: %s parent %s has no rate!\n", | ||
166 | clk->name, clk->parent->name)) | ||
167 | return -EINVAL; | ||
168 | |||
169 | mutex_lock(&clocks_mutex); | ||
170 | list_add_tail(&clk->node, &clocks); | ||
171 | if (clk->parent) | ||
172 | list_add_tail(&clk->childnode, &clk->parent->children); | ||
173 | mutex_unlock(&clocks_mutex); | ||
174 | |||
175 | /* If rate is already set, use it */ | ||
176 | if (clk->rate) | ||
177 | return 0; | ||
178 | |||
179 | /* Else, see if there is a way to calculate it */ | ||
180 | if (clk->recalc) | ||
181 | clk->rate = clk->recalc(clk); | ||
182 | |||
183 | /* Otherwise, default to parent rate */ | ||
184 | else if (clk->parent) | ||
185 | clk->rate = clk->parent->rate; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | EXPORT_SYMBOL(clk_register); | ||
190 | |||
191 | void clk_unregister(struct clk *clk) | ||
192 | { | ||
193 | if (clk == NULL || IS_ERR(clk)) | ||
194 | return; | ||
195 | |||
196 | mutex_lock(&clocks_mutex); | ||
197 | list_del(&clk->node); | ||
198 | list_del(&clk->childnode); | ||
199 | mutex_unlock(&clocks_mutex); | ||
200 | } | ||
201 | EXPORT_SYMBOL(clk_unregister); | ||
202 | |||
203 | |||
204 | static u32 pll_read(struct pll_data *pll, int reg) | ||
205 | { | ||
206 | return soc_readl(pll->base + reg); | ||
207 | } | ||
208 | |||
209 | static unsigned long clk_sysclk_recalc(struct clk *clk) | ||
210 | { | ||
211 | u32 v, plldiv = 0; | ||
212 | struct pll_data *pll; | ||
213 | unsigned long rate = clk->rate; | ||
214 | |||
215 | if (WARN_ON(!clk->parent)) | ||
216 | return rate; | ||
217 | |||
218 | rate = clk->parent->rate; | ||
219 | |||
220 | /* the parent must be a PLL */ | ||
221 | if (WARN_ON(!clk->parent->pll_data)) | ||
222 | return rate; | ||
223 | |||
224 | pll = clk->parent->pll_data; | ||
225 | |||
226 | /* If pre-PLL, source clock is before the multiplier and divider(s) */ | ||
227 | if (clk->flags & PRE_PLL) | ||
228 | rate = pll->input_rate; | ||
229 | |||
230 | if (!clk->div) { | ||
231 | pr_debug("%s: (no divider) rate = %lu KHz\n", | ||
232 | clk->name, rate / 1000); | ||
233 | return rate; | ||
234 | } | ||
235 | |||
236 | if (clk->flags & FIXED_DIV_PLL) { | ||
237 | rate /= clk->div; | ||
238 | pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n", | ||
239 | clk->name, clk->div, rate / 1000); | ||
240 | return rate; | ||
241 | } | ||
242 | |||
243 | v = pll_read(pll, clk->div); | ||
244 | if (v & PLLDIV_EN) | ||
245 | plldiv = (v & PLLDIV_RATIO_MASK) + 1; | ||
246 | |||
247 | if (plldiv == 0) | ||
248 | plldiv = 1; | ||
249 | |||
250 | rate /= plldiv; | ||
251 | |||
252 | pr_debug("%s: (divide by %d) rate = %lu KHz\n", | ||
253 | clk->name, plldiv, rate / 1000); | ||
254 | |||
255 | return rate; | ||
256 | } | ||
257 | |||
258 | static unsigned long clk_leafclk_recalc(struct clk *clk) | ||
259 | { | ||
260 | if (WARN_ON(!clk->parent)) | ||
261 | return clk->rate; | ||
262 | |||
263 | pr_debug("%s: (parent %s) rate = %lu KHz\n", | ||
264 | clk->name, clk->parent->name, clk->parent->rate / 1000); | ||
265 | |||
266 | return clk->parent->rate; | ||
267 | } | ||
268 | |||
269 | static unsigned long clk_pllclk_recalc(struct clk *clk) | ||
270 | { | ||
271 | u32 ctrl, mult = 0, prediv = 0, postdiv = 0; | ||
272 | u8 bypass; | ||
273 | struct pll_data *pll = clk->pll_data; | ||
274 | unsigned long rate = clk->rate; | ||
275 | |||
276 | if (clk->flags & FIXED_RATE_PLL) | ||
277 | return rate; | ||
278 | |||
279 | ctrl = pll_read(pll, PLLCTL); | ||
280 | rate = pll->input_rate = clk->parent->rate; | ||
281 | |||
282 | if (ctrl & PLLCTL_PLLEN) | ||
283 | bypass = 0; | ||
284 | else | ||
285 | bypass = 1; | ||
286 | |||
287 | if (pll->flags & PLL_HAS_MUL) { | ||
288 | mult = pll_read(pll, PLLM); | ||
289 | mult = (mult & PLLM_PLLM_MASK) + 1; | ||
290 | } | ||
291 | if (pll->flags & PLL_HAS_PRE) { | ||
292 | prediv = pll_read(pll, PLLPRE); | ||
293 | if (prediv & PLLDIV_EN) | ||
294 | prediv = (prediv & PLLDIV_RATIO_MASK) + 1; | ||
295 | else | ||
296 | prediv = 0; | ||
297 | } | ||
298 | if (pll->flags & PLL_HAS_POST) { | ||
299 | postdiv = pll_read(pll, PLLPOST); | ||
300 | if (postdiv & PLLDIV_EN) | ||
301 | postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1; | ||
302 | else | ||
303 | postdiv = 1; | ||
304 | } | ||
305 | |||
306 | if (!bypass) { | ||
307 | if (prediv) | ||
308 | rate /= prediv; | ||
309 | if (mult) | ||
310 | rate *= mult; | ||
311 | if (postdiv) | ||
312 | rate /= postdiv; | ||
313 | |||
314 | pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] " | ||
315 | "--> %luMHz output.\n", | ||
316 | pll->num, clk->parent->rate / 1000000, | ||
317 | prediv, mult, postdiv, rate / 1000000); | ||
318 | } else | ||
319 | pr_debug("PLL%d: input = %luMHz, bypass mode.\n", | ||
320 | pll->num, clk->parent->rate / 1000000); | ||
321 | |||
322 | return rate; | ||
323 | } | ||
324 | |||
325 | |||
326 | static void __init __init_clk(struct clk *clk) | ||
327 | { | ||
328 | INIT_LIST_HEAD(&clk->node); | ||
329 | INIT_LIST_HEAD(&clk->children); | ||
330 | INIT_LIST_HEAD(&clk->childnode); | ||
331 | |||
332 | if (!clk->recalc) { | ||
333 | |||
334 | /* Check if clock is a PLL */ | ||
335 | if (clk->pll_data) | ||
336 | clk->recalc = clk_pllclk_recalc; | ||
337 | |||
338 | /* Else, if it is a PLL-derived clock */ | ||
339 | else if (clk->flags & CLK_PLL) | ||
340 | clk->recalc = clk_sysclk_recalc; | ||
341 | |||
342 | /* Otherwise, it is a leaf clock (PSC clock) */ | ||
343 | else if (clk->parent) | ||
344 | clk->recalc = clk_leafclk_recalc; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | void __init c6x_clks_init(struct clk_lookup *clocks) | ||
349 | { | ||
350 | struct clk_lookup *c; | ||
351 | struct clk *clk; | ||
352 | size_t num_clocks = 0; | ||
353 | |||
354 | for (c = clocks; c->clk; c++) { | ||
355 | clk = c->clk; | ||
356 | |||
357 | __init_clk(clk); | ||
358 | clk_register(clk); | ||
359 | num_clocks++; | ||
360 | |||
361 | /* Turn on clocks that Linux doesn't otherwise manage */ | ||
362 | if (clk->flags & ALWAYS_ENABLED) | ||
363 | clk_enable(clk); | ||
364 | } | ||
365 | |||
366 | clkdev_add_table(clocks, num_clocks); | ||
367 | } | ||
368 | |||
369 | #ifdef CONFIG_DEBUG_FS | ||
370 | |||
371 | #include <linux/debugfs.h> | ||
372 | #include <linux/seq_file.h> | ||
373 | |||
374 | #define CLKNAME_MAX 10 /* longest clock name */ | ||
375 | #define NEST_DELTA 2 | ||
376 | #define NEST_MAX 4 | ||
377 | |||
378 | static void | ||
379 | dump_clock(struct seq_file *s, unsigned nest, struct clk *parent) | ||
380 | { | ||
381 | char *state; | ||
382 | char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX]; | ||
383 | struct clk *clk; | ||
384 | unsigned i; | ||
385 | |||
386 | if (parent->flags & CLK_PLL) | ||
387 | state = "pll"; | ||
388 | else | ||
389 | state = ""; | ||
390 | |||
391 | /* <nest spaces> name <pad to end> */ | ||
392 | memset(buf, ' ', sizeof(buf) - 1); | ||
393 | buf[sizeof(buf) - 1] = 0; | ||
394 | i = strlen(parent->name); | ||
395 | memcpy(buf + nest, parent->name, | ||
396 | min(i, (unsigned)(sizeof(buf) - 1 - nest))); | ||
397 | |||
398 | seq_printf(s, "%s users=%2d %-3s %9ld Hz\n", | ||
399 | buf, parent->usecount, state, clk_get_rate(parent)); | ||
400 | /* REVISIT show device associations too */ | ||
401 | |||
402 | /* cost is now small, but not linear... */ | ||
403 | list_for_each_entry(clk, &parent->children, childnode) { | ||
404 | dump_clock(s, nest + NEST_DELTA, clk); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | static int c6x_ck_show(struct seq_file *m, void *v) | ||
409 | { | ||
410 | struct clk *clk; | ||
411 | |||
412 | /* | ||
413 | * Show clock tree; We trust nonzero usecounts equate to PSC enables... | ||
414 | */ | ||
415 | mutex_lock(&clocks_mutex); | ||
416 | list_for_each_entry(clk, &clocks, node) | ||
417 | if (!clk->parent) | ||
418 | dump_clock(m, 0, clk); | ||
419 | mutex_unlock(&clocks_mutex); | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int c6x_ck_open(struct inode *inode, struct file *file) | ||
425 | { | ||
426 | return single_open(file, c6x_ck_show, NULL); | ||
427 | } | ||
428 | |||
429 | static const struct file_operations c6x_ck_operations = { | ||
430 | .open = c6x_ck_open, | ||
431 | .read = seq_read, | ||
432 | .llseek = seq_lseek, | ||
433 | .release = single_release, | ||
434 | }; | ||
435 | |||
436 | static int __init c6x_clk_debugfs_init(void) | ||
437 | { | ||
438 | debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL, | ||
439 | &c6x_ck_operations); | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | device_initcall(c6x_clk_debugfs_init); | ||
444 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c new file mode 100644 index 000000000000..2cfd6f42968f --- /dev/null +++ b/arch/c6x/platforms/plldata.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | * Port on Texas Instruments TMS320C6x architecture | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
5 | * Author: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/ioport.h> | ||
16 | #include <linux/clkdev.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/of_address.h> | ||
19 | |||
20 | #include <asm/clock.h> | ||
21 | #include <asm/setup.h> | ||
22 | #include <asm/irq.h> | ||
23 | |||
24 | /* | ||
25 | * Common SoC clock support. | ||
26 | */ | ||
27 | |||
28 | /* Default input for PLL1 */ | ||
29 | struct clk clkin1 = { | ||
30 | .name = "clkin1", | ||
31 | .node = LIST_HEAD_INIT(clkin1.node), | ||
32 | .children = LIST_HEAD_INIT(clkin1.children), | ||
33 | .childnode = LIST_HEAD_INIT(clkin1.childnode), | ||
34 | }; | ||
35 | |||
36 | struct pll_data c6x_soc_pll1 = { | ||
37 | .num = 1, | ||
38 | .sysclks = { | ||
39 | { | ||
40 | .name = "pll1", | ||
41 | .parent = &clkin1, | ||
42 | .pll_data = &c6x_soc_pll1, | ||
43 | .flags = CLK_PLL, | ||
44 | }, | ||
45 | { | ||
46 | .name = "pll1_sysclk1", | ||
47 | .parent = &c6x_soc_pll1.sysclks[0], | ||
48 | .flags = CLK_PLL, | ||
49 | }, | ||
50 | { | ||
51 | .name = "pll1_sysclk2", | ||
52 | .parent = &c6x_soc_pll1.sysclks[0], | ||
53 | .flags = CLK_PLL, | ||
54 | }, | ||
55 | { | ||
56 | .name = "pll1_sysclk3", | ||
57 | .parent = &c6x_soc_pll1.sysclks[0], | ||
58 | .flags = CLK_PLL, | ||
59 | }, | ||
60 | { | ||
61 | .name = "pll1_sysclk4", | ||
62 | .parent = &c6x_soc_pll1.sysclks[0], | ||
63 | .flags = CLK_PLL, | ||
64 | }, | ||
65 | { | ||
66 | .name = "pll1_sysclk5", | ||
67 | .parent = &c6x_soc_pll1.sysclks[0], | ||
68 | .flags = CLK_PLL, | ||
69 | }, | ||
70 | { | ||
71 | .name = "pll1_sysclk6", | ||
72 | .parent = &c6x_soc_pll1.sysclks[0], | ||
73 | .flags = CLK_PLL, | ||
74 | }, | ||
75 | { | ||
76 | .name = "pll1_sysclk7", | ||
77 | .parent = &c6x_soc_pll1.sysclks[0], | ||
78 | .flags = CLK_PLL, | ||
79 | }, | ||
80 | { | ||
81 | .name = "pll1_sysclk8", | ||
82 | .parent = &c6x_soc_pll1.sysclks[0], | ||
83 | .flags = CLK_PLL, | ||
84 | }, | ||
85 | { | ||
86 | .name = "pll1_sysclk9", | ||
87 | .parent = &c6x_soc_pll1.sysclks[0], | ||
88 | .flags = CLK_PLL, | ||
89 | }, | ||
90 | { | ||
91 | .name = "pll1_sysclk10", | ||
92 | .parent = &c6x_soc_pll1.sysclks[0], | ||
93 | .flags = CLK_PLL, | ||
94 | }, | ||
95 | { | ||
96 | .name = "pll1_sysclk11", | ||
97 | .parent = &c6x_soc_pll1.sysclks[0], | ||
98 | .flags = CLK_PLL, | ||
99 | }, | ||
100 | { | ||
101 | .name = "pll1_sysclk12", | ||
102 | .parent = &c6x_soc_pll1.sysclks[0], | ||
103 | .flags = CLK_PLL, | ||
104 | }, | ||
105 | { | ||
106 | .name = "pll1_sysclk13", | ||
107 | .parent = &c6x_soc_pll1.sysclks[0], | ||
108 | .flags = CLK_PLL, | ||
109 | }, | ||
110 | { | ||
111 | .name = "pll1_sysclk14", | ||
112 | .parent = &c6x_soc_pll1.sysclks[0], | ||
113 | .flags = CLK_PLL, | ||
114 | }, | ||
115 | { | ||
116 | .name = "pll1_sysclk15", | ||
117 | .parent = &c6x_soc_pll1.sysclks[0], | ||
118 | .flags = CLK_PLL, | ||
119 | }, | ||
120 | { | ||
121 | .name = "pll1_sysclk16", | ||
122 | .parent = &c6x_soc_pll1.sysclks[0], | ||
123 | .flags = CLK_PLL, | ||
124 | }, | ||
125 | }, | ||
126 | }; | ||
127 | |||
128 | /* CPU core clock */ | ||
129 | struct clk c6x_core_clk = { | ||
130 | .name = "core", | ||
131 | }; | ||
132 | |||
133 | /* miscellaneous IO clocks */ | ||
134 | struct clk c6x_i2c_clk = { | ||
135 | .name = "i2c", | ||
136 | }; | ||
137 | |||
138 | struct clk c6x_watchdog_clk = { | ||
139 | .name = "watchdog", | ||
140 | }; | ||
141 | |||
142 | struct clk c6x_mcbsp1_clk = { | ||
143 | .name = "mcbsp1", | ||
144 | }; | ||
145 | |||
146 | struct clk c6x_mcbsp2_clk = { | ||
147 | .name = "mcbsp2", | ||
148 | }; | ||
149 | |||
150 | struct clk c6x_mdio_clk = { | ||
151 | .name = "mdio", | ||
152 | }; | ||
153 | |||
154 | |||
155 | #ifdef CONFIG_SOC_TMS320C6455 | ||
156 | static struct clk_lookup c6455_clks[] = { | ||
157 | CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), | ||
158 | CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), | ||
159 | CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), | ||
160 | CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), | ||
161 | CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), | ||
162 | CLK(NULL, "core", &c6x_core_clk), | ||
163 | CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), | ||
164 | CLK("watchdog", NULL, &c6x_watchdog_clk), | ||
165 | CLK("2c81800.mdio", NULL, &c6x_mdio_clk), | ||
166 | CLK("", NULL, NULL) | ||
167 | }; | ||
168 | |||
169 | |||
170 | static void __init c6455_setup_clocks(struct device_node *node) | ||
171 | { | ||
172 | struct pll_data *pll = &c6x_soc_pll1; | ||
173 | struct clk *sysclks = pll->sysclks; | ||
174 | |||
175 | pll->flags = PLL_HAS_PRE | PLL_HAS_MUL; | ||
176 | |||
177 | sysclks[2].flags |= FIXED_DIV_PLL; | ||
178 | sysclks[2].div = 3; | ||
179 | sysclks[3].flags |= FIXED_DIV_PLL; | ||
180 | sysclks[3].div = 6; | ||
181 | sysclks[4].div = PLLDIV4; | ||
182 | sysclks[5].div = PLLDIV5; | ||
183 | |||
184 | c6x_core_clk.parent = &sysclks[0]; | ||
185 | c6x_i2c_clk.parent = &sysclks[3]; | ||
186 | c6x_watchdog_clk.parent = &sysclks[3]; | ||
187 | c6x_mdio_clk.parent = &sysclks[3]; | ||
188 | |||
189 | c6x_clks_init(c6455_clks); | ||
190 | } | ||
191 | #endif /* CONFIG_SOC_TMS320C6455 */ | ||
192 | |||
193 | #ifdef CONFIG_SOC_TMS320C6457 | ||
194 | static struct clk_lookup c6457_clks[] = { | ||
195 | CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), | ||
196 | CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]), | ||
197 | CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), | ||
198 | CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), | ||
199 | CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), | ||
200 | CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), | ||
201 | CLK(NULL, "core", &c6x_core_clk), | ||
202 | CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), | ||
203 | CLK("watchdog", NULL, &c6x_watchdog_clk), | ||
204 | CLK("2c81800.mdio", NULL, &c6x_mdio_clk), | ||
205 | CLK("", NULL, NULL) | ||
206 | }; | ||
207 | |||
208 | static void __init c6457_setup_clocks(struct device_node *node) | ||
209 | { | ||
210 | struct pll_data *pll = &c6x_soc_pll1; | ||
211 | struct clk *sysclks = pll->sysclks; | ||
212 | |||
213 | pll->flags = PLL_HAS_MUL | PLL_HAS_POST; | ||
214 | |||
215 | sysclks[1].flags |= FIXED_DIV_PLL; | ||
216 | sysclks[1].div = 1; | ||
217 | sysclks[2].flags |= FIXED_DIV_PLL; | ||
218 | sysclks[2].div = 3; | ||
219 | sysclks[3].flags |= FIXED_DIV_PLL; | ||
220 | sysclks[3].div = 6; | ||
221 | sysclks[4].div = PLLDIV4; | ||
222 | sysclks[5].div = PLLDIV5; | ||
223 | |||
224 | c6x_core_clk.parent = &sysclks[1]; | ||
225 | c6x_i2c_clk.parent = &sysclks[3]; | ||
226 | c6x_watchdog_clk.parent = &sysclks[5]; | ||
227 | c6x_mdio_clk.parent = &sysclks[5]; | ||
228 | |||
229 | c6x_clks_init(c6457_clks); | ||
230 | } | ||
231 | #endif /* CONFIG_SOC_TMS320C6455 */ | ||
232 | |||
233 | #ifdef CONFIG_SOC_TMS320C6472 | ||
234 | static struct clk_lookup c6472_clks[] = { | ||
235 | CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), | ||
236 | CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]), | ||
237 | CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), | ||
238 | CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), | ||
239 | CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), | ||
240 | CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), | ||
241 | CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]), | ||
242 | CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]), | ||
243 | CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]), | ||
244 | CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]), | ||
245 | CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]), | ||
246 | CLK(NULL, "core", &c6x_core_clk), | ||
247 | CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), | ||
248 | CLK("watchdog", NULL, &c6x_watchdog_clk), | ||
249 | CLK("2c81800.mdio", NULL, &c6x_mdio_clk), | ||
250 | CLK("", NULL, NULL) | ||
251 | }; | ||
252 | |||
253 | /* assumptions used for delay loop calculations */ | ||
254 | #define MIN_CLKIN1_KHz 15625 | ||
255 | #define MAX_CORE_KHz 700000 | ||
256 | #define MIN_PLLOUT_KHz MIN_CLKIN1_KHz | ||
257 | |||
258 | static void __init c6472_setup_clocks(struct device_node *node) | ||
259 | { | ||
260 | struct pll_data *pll = &c6x_soc_pll1; | ||
261 | struct clk *sysclks = pll->sysclks; | ||
262 | int i; | ||
263 | |||
264 | pll->flags = PLL_HAS_MUL; | ||
265 | |||
266 | for (i = 1; i <= 6; i++) { | ||
267 | sysclks[i].flags |= FIXED_DIV_PLL; | ||
268 | sysclks[i].div = 1; | ||
269 | } | ||
270 | |||
271 | sysclks[7].flags |= FIXED_DIV_PLL; | ||
272 | sysclks[7].div = 3; | ||
273 | sysclks[8].flags |= FIXED_DIV_PLL; | ||
274 | sysclks[8].div = 6; | ||
275 | sysclks[9].flags |= FIXED_DIV_PLL; | ||
276 | sysclks[9].div = 2; | ||
277 | sysclks[10].div = PLLDIV10; | ||
278 | |||
279 | c6x_core_clk.parent = &sysclks[get_coreid() + 1]; | ||
280 | c6x_i2c_clk.parent = &sysclks[8]; | ||
281 | c6x_watchdog_clk.parent = &sysclks[8]; | ||
282 | c6x_mdio_clk.parent = &sysclks[5]; | ||
283 | |||
284 | c6x_clks_init(c6472_clks); | ||
285 | } | ||
286 | #endif /* CONFIG_SOC_TMS320C6472 */ | ||
287 | |||
288 | |||
289 | #ifdef CONFIG_SOC_TMS320C6474 | ||
290 | static struct clk_lookup c6474_clks[] = { | ||
291 | CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), | ||
292 | CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]), | ||
293 | CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]), | ||
294 | CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]), | ||
295 | CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]), | ||
296 | CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]), | ||
297 | CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]), | ||
298 | CLK(NULL, "core", &c6x_core_clk), | ||
299 | CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), | ||
300 | CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk), | ||
301 | CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk), | ||
302 | CLK("watchdog", NULL, &c6x_watchdog_clk), | ||
303 | CLK("2c81800.mdio", NULL, &c6x_mdio_clk), | ||
304 | CLK("", NULL, NULL) | ||
305 | }; | ||
306 | |||
307 | static void __init c6474_setup_clocks(struct device_node *node) | ||
308 | { | ||
309 | struct pll_data *pll = &c6x_soc_pll1; | ||
310 | struct clk *sysclks = pll->sysclks; | ||
311 | |||
312 | pll->flags = PLL_HAS_MUL; | ||
313 | |||
314 | sysclks[7].flags |= FIXED_DIV_PLL; | ||
315 | sysclks[7].div = 1; | ||
316 | sysclks[9].flags |= FIXED_DIV_PLL; | ||
317 | sysclks[9].div = 3; | ||
318 | sysclks[10].flags |= FIXED_DIV_PLL; | ||
319 | sysclks[10].div = 6; | ||
320 | |||
321 | sysclks[11].div = PLLDIV11; | ||
322 | |||
323 | sysclks[12].flags |= FIXED_DIV_PLL; | ||
324 | sysclks[12].div = 2; | ||
325 | |||
326 | sysclks[13].div = PLLDIV13; | ||
327 | |||
328 | c6x_core_clk.parent = &sysclks[7]; | ||
329 | c6x_i2c_clk.parent = &sysclks[10]; | ||
330 | c6x_watchdog_clk.parent = &sysclks[10]; | ||
331 | c6x_mcbsp1_clk.parent = &sysclks[10]; | ||
332 | c6x_mcbsp2_clk.parent = &sysclks[10]; | ||
333 | |||
334 | c6x_clks_init(c6474_clks); | ||
335 | } | ||
336 | #endif /* CONFIG_SOC_TMS320C6474 */ | ||
337 | |||
338 | static struct of_device_id c6x_clkc_match[] __initdata = { | ||
339 | #ifdef CONFIG_SOC_TMS320C6455 | ||
340 | { .compatible = "ti,c6455-pll", .data = c6455_setup_clocks }, | ||
341 | #endif | ||
342 | #ifdef CONFIG_SOC_TMS320C6457 | ||
343 | { .compatible = "ti,c6457-pll", .data = c6457_setup_clocks }, | ||
344 | #endif | ||
345 | #ifdef CONFIG_SOC_TMS320C6472 | ||
346 | { .compatible = "ti,c6472-pll", .data = c6472_setup_clocks }, | ||
347 | #endif | ||
348 | #ifdef CONFIG_SOC_TMS320C6474 | ||
349 | { .compatible = "ti,c6474-pll", .data = c6474_setup_clocks }, | ||
350 | #endif | ||
351 | { .compatible = "ti,c64x+pll" }, | ||
352 | {} | ||
353 | }; | ||
354 | |||
355 | void __init c64x_setup_clocks(void) | ||
356 | { | ||
357 | void (*__setup_clocks)(struct device_node *np); | ||
358 | struct pll_data *pll = &c6x_soc_pll1; | ||
359 | struct device_node *node; | ||
360 | const struct of_device_id *id; | ||
361 | int err; | ||
362 | u32 val; | ||
363 | |||
364 | node = of_find_matching_node(NULL, c6x_clkc_match); | ||
365 | if (!node) | ||
366 | return; | ||
367 | |||
368 | pll->base = of_iomap(node, 0); | ||
369 | if (!pll->base) | ||
370 | goto out; | ||
371 | |||
372 | err = of_property_read_u32(node, "clock-frequency", &val); | ||
373 | if (err || val == 0) { | ||
374 | pr_err("%s: no clock-frequency found! Using %dMHz\n", | ||
375 | node->full_name, (int)val / 1000000); | ||
376 | val = 25000000; | ||
377 | } | ||
378 | clkin1.rate = val; | ||
379 | |||
380 | err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val); | ||
381 | if (err) | ||
382 | val = 5000; | ||
383 | pll->bypass_delay = val; | ||
384 | |||
385 | err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val); | ||
386 | if (err) | ||
387 | val = 30000; | ||
388 | pll->reset_delay = val; | ||
389 | |||
390 | err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val); | ||
391 | if (err) | ||
392 | val = 30000; | ||
393 | pll->lock_delay = val; | ||
394 | |||
395 | /* id->data is a pointer to SoC-specific setup */ | ||
396 | id = of_match_node(c6x_clkc_match, node); | ||
397 | if (id && id->data) { | ||
398 | __setup_clocks = id->data; | ||
399 | __setup_clocks(node); | ||
400 | } | ||
401 | |||
402 | out: | ||
403 | of_node_put(node); | ||
404 | } | ||
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c new file mode 100644 index 000000000000..03c03c249191 --- /dev/null +++ b/arch/c6x/platforms/timer64.c | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010, 2011 Texas Instruments Incorporated | ||
3 | * Contributed by: Mark Salter (msalter@redhat.com) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/of.h> | ||
14 | #include <linux/of_irq.h> | ||
15 | #include <linux/of_address.h> | ||
16 | #include <asm/soc.h> | ||
17 | #include <asm/dscr.h> | ||
18 | #include <asm/timer64.h> | ||
19 | |||
20 | struct timer_regs { | ||
21 | u32 reserved0; | ||
22 | u32 emumgt; | ||
23 | u32 reserved1; | ||
24 | u32 reserved2; | ||
25 | u32 cntlo; | ||
26 | u32 cnthi; | ||
27 | u32 prdlo; | ||
28 | u32 prdhi; | ||
29 | u32 tcr; | ||
30 | u32 tgcr; | ||
31 | u32 wdtcr; | ||
32 | }; | ||
33 | |||
34 | static struct timer_regs __iomem *timer; | ||
35 | |||
36 | #define TCR_TSTATLO 0x001 | ||
37 | #define TCR_INVOUTPLO 0x002 | ||
38 | #define TCR_INVINPLO 0x004 | ||
39 | #define TCR_CPLO 0x008 | ||
40 | #define TCR_ENAMODELO_ONCE 0x040 | ||
41 | #define TCR_ENAMODELO_CONT 0x080 | ||
42 | #define TCR_ENAMODELO_MASK 0x0c0 | ||
43 | #define TCR_PWIDLO_MASK 0x030 | ||
44 | #define TCR_CLKSRCLO 0x100 | ||
45 | #define TCR_TIENLO 0x200 | ||
46 | #define TCR_TSTATHI (0x001 << 16) | ||
47 | #define TCR_INVOUTPHI (0x002 << 16) | ||
48 | #define TCR_CPHI (0x008 << 16) | ||
49 | #define TCR_PWIDHI_MASK (0x030 << 16) | ||
50 | #define TCR_ENAMODEHI_ONCE (0x040 << 16) | ||
51 | #define TCR_ENAMODEHI_CONT (0x080 << 16) | ||
52 | #define TCR_ENAMODEHI_MASK (0x0c0 << 16) | ||
53 | |||
54 | #define TGCR_TIMLORS 0x001 | ||
55 | #define TGCR_TIMHIRS 0x002 | ||
56 | #define TGCR_TIMMODE_UD32 0x004 | ||
57 | #define TGCR_TIMMODE_WDT64 0x008 | ||
58 | #define TGCR_TIMMODE_CD32 0x00c | ||
59 | #define TGCR_TIMMODE_MASK 0x00c | ||
60 | #define TGCR_PSCHI_MASK (0x00f << 8) | ||
61 | #define TGCR_TDDRHI_MASK (0x00f << 12) | ||
62 | |||
63 | /* | ||
64 | * Timer clocks are divided down from the CPU clock | ||
65 | * The divisor is in the EMUMGTCLKSPD register | ||
66 | */ | ||
67 | #define TIMER_DIVISOR \ | ||
68 | ((soc_readl(&timer->emumgt) & (0xf << 16)) >> 16) | ||
69 | |||
70 | #define TIMER64_RATE (c6x_core_freq / TIMER_DIVISOR) | ||
71 | |||
72 | #define TIMER64_MODE_DISABLED 0 | ||
73 | #define TIMER64_MODE_ONE_SHOT TCR_ENAMODELO_ONCE | ||
74 | #define TIMER64_MODE_PERIODIC TCR_ENAMODELO_CONT | ||
75 | |||
76 | static int timer64_mode; | ||
77 | static int timer64_devstate_id = -1; | ||
78 | |||
79 | static void timer64_config(unsigned long period) | ||
80 | { | ||
81 | u32 tcr = soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK; | ||
82 | |||
83 | soc_writel(tcr, &timer->tcr); | ||
84 | soc_writel(period - 1, &timer->prdlo); | ||
85 | soc_writel(0, &timer->cntlo); | ||
86 | tcr |= timer64_mode; | ||
87 | soc_writel(tcr, &timer->tcr); | ||
88 | } | ||
89 | |||
90 | static void timer64_enable(void) | ||
91 | { | ||
92 | u32 val; | ||
93 | |||
94 | if (timer64_devstate_id >= 0) | ||
95 | dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED); | ||
96 | |||
97 | /* disable timer, reset count */ | ||
98 | soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr); | ||
99 | soc_writel(0, &timer->prdlo); | ||
100 | |||
101 | /* use internal clock and 1 cycle pulse width */ | ||
102 | val = soc_readl(&timer->tcr); | ||
103 | soc_writel(val & ~(TCR_CLKSRCLO | TCR_PWIDLO_MASK), &timer->tcr); | ||
104 | |||
105 | /* dual 32-bit unchained mode */ | ||
106 | val = soc_readl(&timer->tgcr) & ~TGCR_TIMMODE_MASK; | ||
107 | soc_writel(val, &timer->tgcr); | ||
108 | soc_writel(val | (TGCR_TIMLORS | TGCR_TIMMODE_UD32), &timer->tgcr); | ||
109 | } | ||
110 | |||
111 | static void timer64_disable(void) | ||
112 | { | ||
113 | /* disable timer, reset count */ | ||
114 | soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr); | ||
115 | soc_writel(0, &timer->prdlo); | ||
116 | |||
117 | if (timer64_devstate_id >= 0) | ||
118 | dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_DISABLED); | ||
119 | } | ||
120 | |||
121 | static int next_event(unsigned long delta, | ||
122 | struct clock_event_device *evt) | ||
123 | { | ||
124 | timer64_config(delta); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static void set_clock_mode(enum clock_event_mode mode, | ||
129 | struct clock_event_device *evt) | ||
130 | { | ||
131 | switch (mode) { | ||
132 | case CLOCK_EVT_MODE_PERIODIC: | ||
133 | timer64_enable(); | ||
134 | timer64_mode = TIMER64_MODE_PERIODIC; | ||
135 | timer64_config(TIMER64_RATE / HZ); | ||
136 | break; | ||
137 | case CLOCK_EVT_MODE_ONESHOT: | ||
138 | timer64_enable(); | ||
139 | timer64_mode = TIMER64_MODE_ONE_SHOT; | ||
140 | break; | ||
141 | case CLOCK_EVT_MODE_UNUSED: | ||
142 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
143 | timer64_mode = TIMER64_MODE_DISABLED; | ||
144 | timer64_disable(); | ||
145 | break; | ||
146 | case CLOCK_EVT_MODE_RESUME: | ||
147 | break; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static struct clock_event_device t64_clockevent_device = { | ||
152 | .name = "TIMER64_EVT32_TIMER", | ||
153 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | ||
154 | .rating = 200, | ||
155 | .set_mode = set_clock_mode, | ||
156 | .set_next_event = next_event, | ||
157 | }; | ||
158 | |||
159 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
160 | { | ||
161 | struct clock_event_device *cd = &t64_clockevent_device; | ||
162 | |||
163 | cd->event_handler(cd); | ||
164 | |||
165 | return IRQ_HANDLED; | ||
166 | } | ||
167 | |||
168 | static struct irqaction timer_iact = { | ||
169 | .name = "timer", | ||
170 | .flags = IRQF_TIMER, | ||
171 | .handler = timer_interrupt, | ||
172 | .dev_id = &t64_clockevent_device, | ||
173 | }; | ||
174 | |||
175 | void __init timer64_init(void) | ||
176 | { | ||
177 | struct clock_event_device *cd = &t64_clockevent_device; | ||
178 | struct device_node *np, *first = NULL; | ||
179 | u32 val; | ||
180 | int err, found = 0; | ||
181 | |||
182 | for_each_compatible_node(np, NULL, "ti,c64x+timer64") { | ||
183 | err = of_property_read_u32(np, "ti,core-mask", &val); | ||
184 | if (!err) { | ||
185 | if (val & (1 << get_coreid())) { | ||
186 | found = 1; | ||
187 | break; | ||
188 | } | ||
189 | } else if (!first) | ||
190 | first = np; | ||
191 | } | ||
192 | if (!found) { | ||
193 | /* try first one with no core-mask */ | ||
194 | if (first) | ||
195 | np = of_node_get(first); | ||
196 | else { | ||
197 | pr_debug("Cannot find ti,c64x+timer64 timer.\n"); | ||
198 | return; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | timer = of_iomap(np, 0); | ||
203 | if (!timer) { | ||
204 | pr_debug("%s: Cannot map timer registers.\n", np->full_name); | ||
205 | goto out; | ||
206 | } | ||
207 | pr_debug("%s: Timer registers=%p.\n", np->full_name, timer); | ||
208 | |||
209 | cd->irq = irq_of_parse_and_map(np, 0); | ||
210 | if (cd->irq == NO_IRQ) { | ||
211 | pr_debug("%s: Cannot find interrupt.\n", np->full_name); | ||
212 | iounmap(timer); | ||
213 | goto out; | ||
214 | } | ||
215 | |||
216 | /* If there is a device state control, save the ID. */ | ||
217 | err = of_property_read_u32(np, "ti,dscr-dev-enable", &val); | ||
218 | if (!err) { | ||
219 | timer64_devstate_id = val; | ||
220 | |||
221 | /* | ||
222 | * It is necessary to enable the timer block here because | ||
223 | * the TIMER_DIVISOR macro needs to read a timer register | ||
224 | * to get the divisor. | ||
225 | */ | ||
226 | dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED); | ||
227 | } | ||
228 | |||
229 | pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq); | ||
230 | |||
231 | clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5); | ||
232 | |||
233 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
234 | cd->min_delta_ns = clockevent_delta2ns(250, cd); | ||
235 | |||
236 | cd->cpumask = cpumask_of(smp_processor_id()); | ||
237 | |||
238 | clockevents_register_device(cd); | ||
239 | setup_irq(cd->irq, &timer_iact); | ||
240 | |||
241 | out: | ||
242 | of_node_put(np); | ||
243 | return; | ||
244 | } | ||