diff options
author | Paul Mackerras <paulus@samba.org> | 2006-03-27 03:15:26 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-27 03:15:26 -0500 |
commit | 9b781727fd1062671afa144b93e8c69b14bcac4d (patch) | |
tree | 1cec35f0cedc664394b15165d96944019b8e1ff2 /arch/powerpc/kernel | |
parent | 0eb4cb9b16aba6d610a0716503b96d299b308d44 (diff) |
powerpc: Move cpu_setup_6xx.S and temp.c over to arch/powerpc
Also renamed temp.c to tau_6xx.c (for thermal assist unit) and updated
the Kconfig option description and help text for CONFIG_TAU.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_6xx.S | 474 | ||||
-rw-r--r-- | arch/powerpc/kernel/tau_6xx.c | 271 |
3 files changed, 747 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index e7fddf1e42c7..754c227835bb 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -34,7 +34,8 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o | |||
34 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o | 34 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o |
35 | obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o | 35 | obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o |
36 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 36 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
37 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o | 37 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o |
38 | obj-$(CONFIG_TAU) += tau_6xx.o | ||
38 | 39 | ||
39 | ifeq ($(CONFIG_PPC_MERGE),y) | 40 | ifeq ($(CONFIG_PPC_MERGE),y) |
40 | 41 | ||
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S new file mode 100644 index 000000000000..55ed7716636f --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_6xx.S | |||
@@ -0,0 +1,474 @@ | |||
1 | /* | ||
2 | * This file contains low level CPU setup functions. | ||
3 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/cputable.h> | ||
16 | #include <asm/ppc_asm.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/cache.h> | ||
19 | |||
20 | _GLOBAL(__setup_cpu_603) | ||
21 | b setup_common_caches | ||
22 | _GLOBAL(__setup_cpu_604) | ||
23 | mflr r4 | ||
24 | bl setup_common_caches | ||
25 | bl setup_604_hid0 | ||
26 | mtlr r4 | ||
27 | blr | ||
28 | _GLOBAL(__setup_cpu_750) | ||
29 | mflr r4 | ||
30 | bl __init_fpu_registers | ||
31 | bl setup_common_caches | ||
32 | bl setup_750_7400_hid0 | ||
33 | mtlr r4 | ||
34 | blr | ||
35 | _GLOBAL(__setup_cpu_750cx) | ||
36 | mflr r4 | ||
37 | bl __init_fpu_registers | ||
38 | bl setup_common_caches | ||
39 | bl setup_750_7400_hid0 | ||
40 | bl setup_750cx | ||
41 | mtlr r4 | ||
42 | blr | ||
43 | _GLOBAL(__setup_cpu_750fx) | ||
44 | mflr r4 | ||
45 | bl __init_fpu_registers | ||
46 | bl setup_common_caches | ||
47 | bl setup_750_7400_hid0 | ||
48 | bl setup_750fx | ||
49 | mtlr r4 | ||
50 | blr | ||
51 | _GLOBAL(__setup_cpu_7400) | ||
52 | mflr r4 | ||
53 | bl __init_fpu_registers | ||
54 | bl setup_7400_workarounds | ||
55 | bl setup_common_caches | ||
56 | bl setup_750_7400_hid0 | ||
57 | mtlr r4 | ||
58 | blr | ||
59 | _GLOBAL(__setup_cpu_7410) | ||
60 | mflr r4 | ||
61 | bl __init_fpu_registers | ||
62 | bl setup_7410_workarounds | ||
63 | bl setup_common_caches | ||
64 | bl setup_750_7400_hid0 | ||
65 | li r3,0 | ||
66 | mtspr SPRN_L2CR2,r3 | ||
67 | mtlr r4 | ||
68 | blr | ||
69 | _GLOBAL(__setup_cpu_745x) | ||
70 | mflr r4 | ||
71 | bl setup_common_caches | ||
72 | bl setup_745x_specifics | ||
73 | mtlr r4 | ||
74 | blr | ||
75 | |||
76 | /* Enable caches for 603's, 604, 750 & 7400 */ | ||
77 | setup_common_caches: | ||
78 | mfspr r11,SPRN_HID0 | ||
79 | andi. r0,r11,HID0_DCE | ||
80 | ori r11,r11,HID0_ICE|HID0_DCE | ||
81 | ori r8,r11,HID0_ICFI | ||
82 | bne 1f /* don't invalidate the D-cache */ | ||
83 | ori r8,r8,HID0_DCI /* unless it wasn't enabled */ | ||
84 | 1: sync | ||
85 | mtspr SPRN_HID0,r8 /* enable and invalidate caches */ | ||
86 | sync | ||
87 | mtspr SPRN_HID0,r11 /* enable caches */ | ||
88 | sync | ||
89 | isync | ||
90 | blr | ||
91 | |||
92 | /* 604, 604e, 604ev, ... | ||
93 | * Enable superscalar execution & branch history table | ||
94 | */ | ||
95 | setup_604_hid0: | ||
96 | mfspr r11,SPRN_HID0 | ||
97 | ori r11,r11,HID0_SIED|HID0_BHTE | ||
98 | ori r8,r11,HID0_BTCD | ||
99 | sync | ||
100 | mtspr SPRN_HID0,r8 /* flush branch target address cache */ | ||
101 | sync /* on 604e/604r */ | ||
102 | mtspr SPRN_HID0,r11 | ||
103 | sync | ||
104 | isync | ||
105 | blr | ||
106 | |||
107 | /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some | ||
108 | * erratas we work around here. | ||
109 | * Moto MPC710CE.pdf describes them, those are errata | ||
110 | * #3, #4 and #5 | ||
111 | * Note that we assume the firmware didn't choose to | ||
112 | * apply other workarounds (there are other ones documented | ||
113 | * in the .pdf). It appear that Apple firmware only works | ||
114 | * around #3 and with the same fix we use. We may want to | ||
115 | * check if the CPU is using 60x bus mode in which case | ||
116 | * the workaround for errata #4 is useless. Also, we may | ||
117 | * want to explicitely clear HID0_NOPDST as this is not | ||
118 | * needed once we have applied workaround #5 (though it's | ||
119 | * not set by Apple's firmware at least). | ||
120 | */ | ||
121 | setup_7400_workarounds: | ||
122 | mfpvr r3 | ||
123 | rlwinm r3,r3,0,20,31 | ||
124 | cmpwi 0,r3,0x0207 | ||
125 | ble 1f | ||
126 | blr | ||
127 | setup_7410_workarounds: | ||
128 | mfpvr r3 | ||
129 | rlwinm r3,r3,0,20,31 | ||
130 | cmpwi 0,r3,0x0100 | ||
131 | bnelr | ||
132 | 1: | ||
133 | mfspr r11,SPRN_MSSSR0 | ||
134 | /* Errata #3: Set L1OPQ_SIZE to 0x10 */ | ||
135 | rlwinm r11,r11,0,9,6 | ||
136 | oris r11,r11,0x0100 | ||
137 | /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ | ||
138 | oris r11,r11,0x0002 | ||
139 | /* Errata #5: Set DRLT_SIZE to 0x01 */ | ||
140 | rlwinm r11,r11,0,5,2 | ||
141 | oris r11,r11,0x0800 | ||
142 | sync | ||
143 | mtspr SPRN_MSSSR0,r11 | ||
144 | sync | ||
145 | isync | ||
146 | blr | ||
147 | |||
148 | /* 740/750/7400/7410 | ||
149 | * Enable Store Gathering (SGE), Address Brodcast (ABE), | ||
150 | * Branch History Table (BHTE), Branch Target ICache (BTIC) | ||
151 | * Dynamic Power Management (DPM), Speculative (SPD) | ||
152 | * Clear Instruction cache throttling (ICTC) | ||
153 | */ | ||
154 | setup_750_7400_hid0: | ||
155 | mfspr r11,SPRN_HID0 | ||
156 | ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC | ||
157 | oris r11,r11,HID0_DPM@h | ||
158 | BEGIN_FTR_SECTION | ||
159 | xori r11,r11,HID0_BTIC | ||
160 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) | ||
161 | BEGIN_FTR_SECTION | ||
162 | xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ | ||
163 | END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) | ||
164 | li r3,HID0_SPD | ||
165 | andc r11,r11,r3 /* clear SPD: enable speculative */ | ||
166 | li r3,0 | ||
167 | mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ | ||
168 | isync | ||
169 | mtspr SPRN_HID0,r11 | ||
170 | sync | ||
171 | isync | ||
172 | blr | ||
173 | |||
174 | /* 750cx specific | ||
175 | * Looks like we have to disable NAP feature for some PLL settings... | ||
176 | * (waiting for confirmation) | ||
177 | */ | ||
178 | setup_750cx: | ||
179 | mfspr r10, SPRN_HID1 | ||
180 | rlwinm r10,r10,4,28,31 | ||
181 | cmpwi cr0,r10,7 | ||
182 | cmpwi cr1,r10,9 | ||
183 | cmpwi cr2,r10,11 | ||
184 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
185 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | ||
186 | bnelr | ||
187 | lwz r6,CPU_SPEC_FEATURES(r5) | ||
188 | li r7,CPU_FTR_CAN_NAP | ||
189 | andc r6,r6,r7 | ||
190 | stw r6,CPU_SPEC_FEATURES(r5) | ||
191 | blr | ||
192 | |||
193 | /* 750fx specific | ||
194 | */ | ||
195 | setup_750fx: | ||
196 | blr | ||
197 | |||
198 | /* MPC 745x | ||
199 | * Enable Store Gathering (SGE), Branch Folding (FOLD) | ||
200 | * Branch History Table (BHTE), Branch Target ICache (BTIC) | ||
201 | * Dynamic Power Management (DPM), Speculative (SPD) | ||
202 | * Ensure our data cache instructions really operate. | ||
203 | * Timebase has to be running or we wouldn't have made it here, | ||
204 | * just ensure we don't disable it. | ||
205 | * Clear Instruction cache throttling (ICTC) | ||
206 | * Enable L2 HW prefetch | ||
207 | */ | ||
208 | setup_745x_specifics: | ||
209 | /* We check for the presence of an L3 cache setup by | ||
210 | * the firmware. If any, we disable NAP capability as | ||
211 | * it's known to be bogus on rev 2.1 and earlier | ||
212 | */ | ||
213 | mfspr r11,SPRN_L3CR | ||
214 | andis. r11,r11,L3CR_L3E@h | ||
215 | beq 1f | ||
216 | lwz r6,CPU_SPEC_FEATURES(r5) | ||
217 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | ||
218 | beq 1f | ||
219 | li r7,CPU_FTR_CAN_NAP | ||
220 | andc r6,r6,r7 | ||
221 | stw r6,CPU_SPEC_FEATURES(r5) | ||
222 | 1: | ||
223 | mfspr r11,SPRN_HID0 | ||
224 | |||
225 | /* All of the bits we have to set..... | ||
226 | */ | ||
227 | ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | ||
228 | ori r11,r11,HID0_LRSTK | HID0_BTIC | ||
229 | oris r11,r11,HID0_DPM@h | ||
230 | BEGIN_FTR_SECTION | ||
231 | xori r11,r11,HID0_BTIC | ||
232 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) | ||
233 | BEGIN_FTR_SECTION | ||
234 | xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ | ||
235 | END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) | ||
236 | |||
237 | /* All of the bits we have to clear.... | ||
238 | */ | ||
239 | li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI | ||
240 | andc r11,r11,r3 /* clear SPD: enable speculative */ | ||
241 | li r3,0 | ||
242 | |||
243 | mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ | ||
244 | isync | ||
245 | mtspr SPRN_HID0,r11 | ||
246 | sync | ||
247 | isync | ||
248 | |||
249 | /* Enable L2 HW prefetch, if L2 is enabled | ||
250 | */ | ||
251 | mfspr r3,SPRN_L2CR | ||
252 | andis. r3,r3,L2CR_L2E@h | ||
253 | beqlr | ||
254 | mfspr r3,SPRN_MSSCR0 | ||
255 | ori r3,r3,3 | ||
256 | sync | ||
257 | mtspr SPRN_MSSCR0,r3 | ||
258 | sync | ||
259 | isync | ||
260 | blr | ||
261 | |||
262 | /* | ||
263 | * Initialize the FPU registers. This is needed to work around an errata | ||
264 | * in some 750 cpus where using a not yet initialized FPU register after | ||
265 | * power on reset may hang the CPU | ||
266 | */ | ||
267 | _GLOBAL(__init_fpu_registers) | ||
268 | mfmsr r10 | ||
269 | ori r11,r10,MSR_FP | ||
270 | mtmsr r11 | ||
271 | isync | ||
272 | addis r9,r3,empty_zero_page@ha | ||
273 | addi r9,r9,empty_zero_page@l | ||
274 | REST_32FPRS(0,r9) | ||
275 | sync | ||
276 | mtmsr r10 | ||
277 | isync | ||
278 | blr | ||
279 | |||
280 | |||
281 | /* Definitions for the table use to save CPU states */ | ||
282 | #define CS_HID0 0 | ||
283 | #define CS_HID1 4 | ||
284 | #define CS_HID2 8 | ||
285 | #define CS_MSSCR0 12 | ||
286 | #define CS_MSSSR0 16 | ||
287 | #define CS_ICTRL 20 | ||
288 | #define CS_LDSTCR 24 | ||
289 | #define CS_LDSTDB 28 | ||
290 | #define CS_SIZE 32 | ||
291 | |||
292 | .data | ||
293 | .balign L1_CACHE_BYTES | ||
294 | cpu_state_storage: | ||
295 | .space CS_SIZE | ||
296 | .balign L1_CACHE_BYTES,0 | ||
297 | .text | ||
298 | |||
299 | /* Called in normal context to backup CPU 0 state. This | ||
300 | * does not include cache settings. This function is also | ||
301 | * called for machine sleep. This does not include the MMU | ||
302 | * setup, BATs, etc... but rather the "special" registers | ||
303 | * like HID0, HID1, MSSCR0, etc... | ||
304 | */ | ||
305 | _GLOBAL(__save_cpu_setup) | ||
306 | /* Some CR fields are volatile, we back it up all */ | ||
307 | mfcr r7 | ||
308 | |||
309 | /* Get storage ptr */ | ||
310 | lis r5,cpu_state_storage@h | ||
311 | ori r5,r5,cpu_state_storage@l | ||
312 | |||
313 | /* Save HID0 (common to all CONFIG_6xx cpus) */ | ||
314 | mfspr r3,SPRN_HID0 | ||
315 | stw r3,CS_HID0(r5) | ||
316 | |||
317 | /* Now deal with CPU type dependent registers */ | ||
318 | mfspr r3,SPRN_PVR | ||
319 | srwi r3,r3,16 | ||
320 | cmplwi cr0,r3,0x8000 /* 7450 */ | ||
321 | cmplwi cr1,r3,0x000c /* 7400 */ | ||
322 | cmplwi cr2,r3,0x800c /* 7410 */ | ||
323 | cmplwi cr3,r3,0x8001 /* 7455 */ | ||
324 | cmplwi cr4,r3,0x8002 /* 7457 */ | ||
325 | cmplwi cr5,r3,0x8003 /* 7447A */ | ||
326 | cmplwi cr6,r3,0x7000 /* 750FX */ | ||
327 | cmplwi cr7,r3,0x8004 /* 7448 */ | ||
328 | /* cr1 is 7400 || 7410 */ | ||
329 | cror 4*cr1+eq,4*cr1+eq,4*cr2+eq | ||
330 | /* cr0 is 74xx */ | ||
331 | cror 4*cr0+eq,4*cr0+eq,4*cr3+eq | ||
332 | cror 4*cr0+eq,4*cr0+eq,4*cr4+eq | ||
333 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
334 | cror 4*cr0+eq,4*cr0+eq,4*cr5+eq | ||
335 | cror 4*cr0+eq,4*cr0+eq,4*cr7+eq | ||
336 | bne 1f | ||
337 | /* Backup 74xx specific regs */ | ||
338 | mfspr r4,SPRN_MSSCR0 | ||
339 | stw r4,CS_MSSCR0(r5) | ||
340 | mfspr r4,SPRN_MSSSR0 | ||
341 | stw r4,CS_MSSSR0(r5) | ||
342 | beq cr1,1f | ||
343 | /* Backup 745x specific registers */ | ||
344 | mfspr r4,SPRN_HID1 | ||
345 | stw r4,CS_HID1(r5) | ||
346 | mfspr r4,SPRN_ICTRL | ||
347 | stw r4,CS_ICTRL(r5) | ||
348 | mfspr r4,SPRN_LDSTCR | ||
349 | stw r4,CS_LDSTCR(r5) | ||
350 | mfspr r4,SPRN_LDSTDB | ||
351 | stw r4,CS_LDSTDB(r5) | ||
352 | 1: | ||
353 | bne cr6,1f | ||
354 | /* Backup 750FX specific registers */ | ||
355 | mfspr r4,SPRN_HID1 | ||
356 | stw r4,CS_HID1(r5) | ||
357 | /* If rev 2.x, backup HID2 */ | ||
358 | mfspr r3,SPRN_PVR | ||
359 | andi. r3,r3,0xff00 | ||
360 | cmpwi cr0,r3,0x0200 | ||
361 | bne 1f | ||
362 | mfspr r4,SPRN_HID2 | ||
363 | stw r4,CS_HID2(r5) | ||
364 | 1: | ||
365 | mtcr r7 | ||
366 | blr | ||
367 | |||
368 | /* Called with no MMU context (typically MSR:IR/DR off) to | ||
369 | * restore CPU state as backed up by the previous | ||
370 | * function. This does not include cache setting | ||
371 | */ | ||
372 | _GLOBAL(__restore_cpu_setup) | ||
373 | /* Some CR fields are volatile, we back it up all */ | ||
374 | mfcr r7 | ||
375 | |||
376 | /* Get storage ptr */ | ||
377 | lis r5,(cpu_state_storage-KERNELBASE)@h | ||
378 | ori r5,r5,cpu_state_storage@l | ||
379 | |||
380 | /* Restore HID0 */ | ||
381 | lwz r3,CS_HID0(r5) | ||
382 | sync | ||
383 | isync | ||
384 | mtspr SPRN_HID0,r3 | ||
385 | sync | ||
386 | isync | ||
387 | |||
388 | /* Now deal with CPU type dependent registers */ | ||
389 | mfspr r3,SPRN_PVR | ||
390 | srwi r3,r3,16 | ||
391 | cmplwi cr0,r3,0x8000 /* 7450 */ | ||
392 | cmplwi cr1,r3,0x000c /* 7400 */ | ||
393 | cmplwi cr2,r3,0x800c /* 7410 */ | ||
394 | cmplwi cr3,r3,0x8001 /* 7455 */ | ||
395 | cmplwi cr4,r3,0x8002 /* 7457 */ | ||
396 | cmplwi cr5,r3,0x8003 /* 7447A */ | ||
397 | cmplwi cr6,r3,0x7000 /* 750FX */ | ||
398 | cmplwi cr7,r3,0x8004 /* 7448 */ | ||
399 | /* cr1 is 7400 || 7410 */ | ||
400 | cror 4*cr1+eq,4*cr1+eq,4*cr2+eq | ||
401 | /* cr0 is 74xx */ | ||
402 | cror 4*cr0+eq,4*cr0+eq,4*cr3+eq | ||
403 | cror 4*cr0+eq,4*cr0+eq,4*cr4+eq | ||
404 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
405 | cror 4*cr0+eq,4*cr0+eq,4*cr5+eq | ||
406 | cror 4*cr0+eq,4*cr0+eq,4*cr7+eq | ||
407 | bne 2f | ||
408 | /* Restore 74xx specific regs */ | ||
409 | lwz r4,CS_MSSCR0(r5) | ||
410 | sync | ||
411 | mtspr SPRN_MSSCR0,r4 | ||
412 | sync | ||
413 | isync | ||
414 | lwz r4,CS_MSSSR0(r5) | ||
415 | sync | ||
416 | mtspr SPRN_MSSSR0,r4 | ||
417 | sync | ||
418 | isync | ||
419 | bne cr2,1f | ||
420 | /* Clear 7410 L2CR2 */ | ||
421 | li r4,0 | ||
422 | mtspr SPRN_L2CR2,r4 | ||
423 | 1: beq cr1,2f | ||
424 | /* Restore 745x specific registers */ | ||
425 | lwz r4,CS_HID1(r5) | ||
426 | sync | ||
427 | mtspr SPRN_HID1,r4 | ||
428 | isync | ||
429 | sync | ||
430 | lwz r4,CS_ICTRL(r5) | ||
431 | sync | ||
432 | mtspr SPRN_ICTRL,r4 | ||
433 | isync | ||
434 | sync | ||
435 | lwz r4,CS_LDSTCR(r5) | ||
436 | sync | ||
437 | mtspr SPRN_LDSTCR,r4 | ||
438 | isync | ||
439 | sync | ||
440 | lwz r4,CS_LDSTDB(r5) | ||
441 | sync | ||
442 | mtspr SPRN_LDSTDB,r4 | ||
443 | isync | ||
444 | sync | ||
445 | 2: bne cr6,1f | ||
446 | /* Restore 750FX specific registers | ||
447 | * that is restore HID2 on rev 2.x and PLL config & switch | ||
448 | * to PLL 0 on all | ||
449 | */ | ||
450 | /* If rev 2.x, restore HID2 with low voltage bit cleared */ | ||
451 | mfspr r3,SPRN_PVR | ||
452 | andi. r3,r3,0xff00 | ||
453 | cmpwi cr0,r3,0x0200 | ||
454 | bne 4f | ||
455 | lwz r4,CS_HID2(r5) | ||
456 | rlwinm r4,r4,0,19,17 | ||
457 | mtspr SPRN_HID2,r4 | ||
458 | sync | ||
459 | 4: | ||
460 | lwz r4,CS_HID1(r5) | ||
461 | rlwinm r5,r4,0,16,14 | ||
462 | mtspr SPRN_HID1,r5 | ||
463 | /* Wait for PLL to stabilize */ | ||
464 | mftbl r5 | ||
465 | 3: mftbl r6 | ||
466 | sub r6,r6,r5 | ||
467 | cmplwi cr0,r6,10000 | ||
468 | ble 3b | ||
469 | /* Setup final PLL */ | ||
470 | mtspr SPRN_HID1,r4 | ||
471 | 1: | ||
472 | mtcr r7 | ||
473 | blr | ||
474 | |||
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c new file mode 100644 index 000000000000..26bd8ea35a4e --- /dev/null +++ b/arch/powerpc/kernel/tau_6xx.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * temp.c Thermal management for cpu's with Thermal Assist Units | ||
3 | * | ||
4 | * Written by Troy Benjegerdes <hozer@drgw.net> | ||
5 | * | ||
6 | * TODO: | ||
7 | * dynamic power management to limit peak CPU temp (using ICTC) | ||
8 | * calibration??? | ||
9 | * | ||
10 | * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery | ||
11 | * life in portables, and add a 'performance/watt' metric somewhere in /proc | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/jiffies.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/param.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/io.h> | ||
25 | #include <asm/reg.h> | ||
26 | #include <asm/nvram.h> | ||
27 | #include <asm/cache.h> | ||
28 | #include <asm/8xx_immap.h> | ||
29 | #include <asm/machdep.h> | ||
30 | |||
31 | static struct tau_temp | ||
32 | { | ||
33 | int interrupts; | ||
34 | unsigned char low; | ||
35 | unsigned char high; | ||
36 | unsigned char grew; | ||
37 | } tau[NR_CPUS]; | ||
38 | |||
39 | struct timer_list tau_timer; | ||
40 | |||
41 | #undef DEBUG | ||
42 | |||
43 | /* TODO: put these in a /proc interface, with some sanity checks, and maybe | ||
44 | * dynamic adjustment to minimize # of interrupts */ | ||
45 | /* configurable values for step size and how much to expand the window when | ||
46 | * we get an interrupt. These are based on the limit that was out of range */ | ||
47 | #define step_size 2 /* step size when temp goes out of range */ | ||
48 | #define window_expand 1 /* expand the window by this much */ | ||
49 | /* configurable values for shrinking the window */ | ||
50 | #define shrink_timer 2*HZ /* period between shrinking the window */ | ||
51 | #define min_window 2 /* minimum window size, degrees C */ | ||
52 | |||
53 | void set_thresholds(unsigned long cpu) | ||
54 | { | ||
55 | #ifdef CONFIG_TAU_INT | ||
56 | /* | ||
57 | * setup THRM1, | ||
58 | * threshold, valid bit, enable interrupts, interrupt when below threshold | ||
59 | */ | ||
60 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); | ||
61 | |||
62 | /* setup THRM2, | ||
63 | * threshold, valid bit, enable interrupts, interrupt when above threshhold | ||
64 | */ | ||
65 | mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); | ||
66 | #else | ||
67 | /* same thing but don't enable interrupts */ | ||
68 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); | ||
69 | mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); | ||
70 | #endif | ||
71 | } | ||
72 | |||
73 | void TAUupdate(int cpu) | ||
74 | { | ||
75 | unsigned thrm; | ||
76 | |||
77 | #ifdef DEBUG | ||
78 | printk("TAUupdate "); | ||
79 | #endif | ||
80 | |||
81 | /* if both thresholds are crossed, the step_sizes cancel out | ||
82 | * and the window winds up getting expanded twice. */ | ||
83 | if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ | ||
84 | if(thrm & THRM1_TIN){ /* crossed low threshold */ | ||
85 | if (tau[cpu].low >= step_size){ | ||
86 | tau[cpu].low -= step_size; | ||
87 | tau[cpu].high -= (step_size - window_expand); | ||
88 | } | ||
89 | tau[cpu].grew = 1; | ||
90 | #ifdef DEBUG | ||
91 | printk("low threshold crossed "); | ||
92 | #endif | ||
93 | } | ||
94 | } | ||
95 | if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ | ||
96 | if(thrm & THRM1_TIN){ /* crossed high threshold */ | ||
97 | if (tau[cpu].high <= 127-step_size){ | ||
98 | tau[cpu].low += (step_size - window_expand); | ||
99 | tau[cpu].high += step_size; | ||
100 | } | ||
101 | tau[cpu].grew = 1; | ||
102 | #ifdef DEBUG | ||
103 | printk("high threshold crossed "); | ||
104 | #endif | ||
105 | } | ||
106 | } | ||
107 | |||
108 | #ifdef DEBUG | ||
109 | printk("grew = %d\n", tau[cpu].grew); | ||
110 | #endif | ||
111 | |||
112 | #ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ | ||
113 | set_thresholds(cpu); | ||
114 | #endif | ||
115 | |||
116 | } | ||
117 | |||
118 | #ifdef CONFIG_TAU_INT | ||
119 | /* | ||
120 | * TAU interrupts - called when we have a thermal assist unit interrupt | ||
121 | * with interrupts disabled | ||
122 | */ | ||
123 | |||
124 | void TAUException(struct pt_regs * regs) | ||
125 | { | ||
126 | int cpu = smp_processor_id(); | ||
127 | |||
128 | irq_enter(); | ||
129 | tau[cpu].interrupts++; | ||
130 | |||
131 | TAUupdate(cpu); | ||
132 | |||
133 | irq_exit(); | ||
134 | } | ||
135 | #endif /* CONFIG_TAU_INT */ | ||
136 | |||
137 | static void tau_timeout(void * info) | ||
138 | { | ||
139 | int cpu; | ||
140 | unsigned long flags; | ||
141 | int size; | ||
142 | int shrink; | ||
143 | |||
144 | /* disabling interrupts *should* be okay */ | ||
145 | local_irq_save(flags); | ||
146 | cpu = smp_processor_id(); | ||
147 | |||
148 | #ifndef CONFIG_TAU_INT | ||
149 | TAUupdate(cpu); | ||
150 | #endif | ||
151 | |||
152 | size = tau[cpu].high - tau[cpu].low; | ||
153 | if (size > min_window && ! tau[cpu].grew) { | ||
154 | /* do an exponential shrink of half the amount currently over size */ | ||
155 | shrink = (2 + size - min_window) / 4; | ||
156 | if (shrink) { | ||
157 | tau[cpu].low += shrink; | ||
158 | tau[cpu].high -= shrink; | ||
159 | } else { /* size must have been min_window + 1 */ | ||
160 | tau[cpu].low += 1; | ||
161 | #if 1 /* debug */ | ||
162 | if ((tau[cpu].high - tau[cpu].low) != min_window){ | ||
163 | printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__); | ||
164 | } | ||
165 | #endif | ||
166 | } | ||
167 | } | ||
168 | |||
169 | tau[cpu].grew = 0; | ||
170 | |||
171 | set_thresholds(cpu); | ||
172 | |||
173 | /* | ||
174 | * Do the enable every time, since otherwise a bunch of (relatively) | ||
175 | * complex sleep code needs to be added. One mtspr every time | ||
176 | * tau_timeout is called is probably not a big deal. | ||
177 | * | ||
178 | * Enable thermal sensor and set up sample interval timer | ||
179 | * need 20 us to do the compare.. until a nice 'cpu_speed' function | ||
180 | * call is implemented, just assume a 500 mhz clock. It doesn't really | ||
181 | * matter if we take too long for a compare since it's all interrupt | ||
182 | * driven anyway. | ||
183 | * | ||
184 | * use a extra long time.. (60 us @ 500 mhz) | ||
185 | */ | ||
186 | mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); | ||
187 | |||
188 | local_irq_restore(flags); | ||
189 | } | ||
190 | |||
191 | static void tau_timeout_smp(unsigned long unused) | ||
192 | { | ||
193 | |||
194 | /* schedule ourselves to be run again */ | ||
195 | mod_timer(&tau_timer, jiffies + shrink_timer) ; | ||
196 | on_each_cpu(tau_timeout, NULL, 1, 0); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * setup the TAU | ||
201 | * | ||
202 | * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound. | ||
203 | * Start off at zero | ||
204 | */ | ||
205 | |||
206 | int tau_initialized = 0; | ||
207 | |||
208 | void __init TAU_init_smp(void * info) | ||
209 | { | ||
210 | unsigned long cpu = smp_processor_id(); | ||
211 | |||
212 | /* set these to a reasonable value and let the timer shrink the | ||
213 | * window */ | ||
214 | tau[cpu].low = 5; | ||
215 | tau[cpu].high = 120; | ||
216 | |||
217 | set_thresholds(cpu); | ||
218 | } | ||
219 | |||
220 | int __init TAU_init(void) | ||
221 | { | ||
222 | /* We assume in SMP that if one CPU has TAU support, they | ||
223 | * all have it --BenH | ||
224 | */ | ||
225 | if (!cpu_has_feature(CPU_FTR_TAU)) { | ||
226 | printk("Thermal assist unit not available\n"); | ||
227 | tau_initialized = 0; | ||
228 | return 1; | ||
229 | } | ||
230 | |||
231 | |||
232 | /* first, set up the window shrinking timer */ | ||
233 | init_timer(&tau_timer); | ||
234 | tau_timer.function = tau_timeout_smp; | ||
235 | tau_timer.expires = jiffies + shrink_timer; | ||
236 | add_timer(&tau_timer); | ||
237 | |||
238 | on_each_cpu(TAU_init_smp, NULL, 1, 0); | ||
239 | |||
240 | printk("Thermal assist unit "); | ||
241 | #ifdef CONFIG_TAU_INT | ||
242 | printk("using interrupts, "); | ||
243 | #else | ||
244 | printk("using timers, "); | ||
245 | #endif | ||
246 | printk("shrink_timer: %d jiffies\n", shrink_timer); | ||
247 | tau_initialized = 1; | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | __initcall(TAU_init); | ||
253 | |||
254 | /* | ||
255 | * return current temp | ||
256 | */ | ||
257 | |||
258 | u32 cpu_temp_both(unsigned long cpu) | ||
259 | { | ||
260 | return ((tau[cpu].high << 16) | tau[cpu].low); | ||
261 | } | ||
262 | |||
263 | int cpu_temp(unsigned long cpu) | ||
264 | { | ||
265 | return ((tau[cpu].high + tau[cpu].low) / 2); | ||
266 | } | ||
267 | |||
268 | int tau_interrupts(unsigned long cpu) | ||
269 | { | ||
270 | return (tau[cpu].interrupts); | ||
271 | } | ||