aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile28
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S474
-rw-r--r--arch/ppc/kernel/entry.S60
-rw-r--r--arch/ppc/kernel/head.S183
-rw-r--r--arch/ppc/kernel/idle.c112
-rw-r--r--arch/ppc/kernel/idle_6xx.S233
-rw-r--r--arch/ppc/kernel/idle_power4.S91
-rw-r--r--arch/ppc/kernel/l2cr.S471
-rw-r--r--arch/ppc/kernel/module.c320
-rw-r--r--arch/ppc/kernel/pci.c396
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c222
-rw-r--r--arch/ppc/kernel/ppc_htab.c8
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c26
-rw-r--r--arch/ppc/kernel/setup.c256
-rw-r--r--arch/ppc/kernel/smp.c2
-rw-r--r--arch/ppc/kernel/swsusp.S349
-rw-r--r--arch/ppc/kernel/temp.c271
17 files changed, 22 insertions, 3480 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index e399bbb969a4..466437f4bcbb 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,48 +1,24 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4ifneq ($(CONFIG_PPC_MERGE),y)
5
6extra-$(CONFIG_PPC_STD_MMU) := head.o 4extra-$(CONFIG_PPC_STD_MMU) := head.o
7extra-$(CONFIG_40x) := head_4xx.o 5extra-$(CONFIG_40x) := head_4xx.o
8extra-$(CONFIG_44x) := head_44x.o 6extra-$(CONFIG_44x) := head_44x.o
9extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o 7extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
10extra-$(CONFIG_8xx) := head_8xx.o 8extra-$(CONFIG_8xx) := head_8xx.o
11extra-$(CONFIG_6xx) += idle_6xx.o
12extra-y += vmlinux.lds 9extra-y += vmlinux.lds
13 10
14obj-y := entry.o traps.o idle.o time.o misc.o \ 11obj-y := entry.o traps.o time.o misc.o \
15 setup.o \ 12 setup.o \
16 ppc_htab.o 13 ppc_htab.o
17obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 14obj-$(CONFIG_MODULES) += ppc_ksyms.o
18obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
19obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
20obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o 15obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
21obj-$(CONFIG_PCI) += pci.o 16obj-$(CONFIG_PCI) += pci.o
22obj-$(CONFIG_RAPIDIO) += rio.o 17obj-$(CONFIG_RAPIDIO) += rio.o
23obj-$(CONFIG_KGDB) += ppc-stub.o 18obj-$(CONFIG_KGDB) += ppc-stub.o
24obj-$(CONFIG_SMP) += smp.o smp-tbsync.o 19obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
25obj-$(CONFIG_TAU) += temp.o
26ifndef CONFIG_E200
27obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
28endif
29obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 20obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
30 21
31ifndef CONFIG_MATH_EMULATION 22ifndef CONFIG_MATH_EMULATION
32obj-$(CONFIG_8xx) += softemu8xx.o 23obj-$(CONFIG_8xx) += softemu8xx.o
33endif 24endif
34
35# These are here while we do the architecture merge
36
37else
38obj-y := idle.o
39obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
40obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
41obj-$(CONFIG_MODULES) += module.o
42obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
43obj-$(CONFIG_KGDB) += ppc-stub.o
44obj-$(CONFIG_TAU) += temp.o
45ifndef CONFIG_E200
46obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
47endif
48endif
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
deleted file mode 100644
index 55ed7716636f..000000000000
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ /dev/null
@@ -1,474 +0,0 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <linux/config.h>
13#include <asm/processor.h>
14#include <asm/page.h>
15#include <asm/cputable.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/cache.h>
19
20_GLOBAL(__setup_cpu_603)
21 b setup_common_caches
22_GLOBAL(__setup_cpu_604)
23 mflr r4
24 bl setup_common_caches
25 bl setup_604_hid0
26 mtlr r4
27 blr
28_GLOBAL(__setup_cpu_750)
29 mflr r4
30 bl __init_fpu_registers
31 bl setup_common_caches
32 bl setup_750_7400_hid0
33 mtlr r4
34 blr
35_GLOBAL(__setup_cpu_750cx)
36 mflr r4
37 bl __init_fpu_registers
38 bl setup_common_caches
39 bl setup_750_7400_hid0
40 bl setup_750cx
41 mtlr r4
42 blr
43_GLOBAL(__setup_cpu_750fx)
44 mflr r4
45 bl __init_fpu_registers
46 bl setup_common_caches
47 bl setup_750_7400_hid0
48 bl setup_750fx
49 mtlr r4
50 blr
51_GLOBAL(__setup_cpu_7400)
52 mflr r4
53 bl __init_fpu_registers
54 bl setup_7400_workarounds
55 bl setup_common_caches
56 bl setup_750_7400_hid0
57 mtlr r4
58 blr
59_GLOBAL(__setup_cpu_7410)
60 mflr r4
61 bl __init_fpu_registers
62 bl setup_7410_workarounds
63 bl setup_common_caches
64 bl setup_750_7400_hid0
65 li r3,0
66 mtspr SPRN_L2CR2,r3
67 mtlr r4
68 blr
69_GLOBAL(__setup_cpu_745x)
70 mflr r4
71 bl setup_common_caches
72 bl setup_745x_specifics
73 mtlr r4
74 blr
75
76/* Enable caches for 603's, 604, 750 & 7400 */
77setup_common_caches:
78 mfspr r11,SPRN_HID0
79 andi. r0,r11,HID0_DCE
80 ori r11,r11,HID0_ICE|HID0_DCE
81 ori r8,r11,HID0_ICFI
82 bne 1f /* don't invalidate the D-cache */
83 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
841: sync
85 mtspr SPRN_HID0,r8 /* enable and invalidate caches */
86 sync
87 mtspr SPRN_HID0,r11 /* enable caches */
88 sync
89 isync
90 blr
91
92/* 604, 604e, 604ev, ...
93 * Enable superscalar execution & branch history table
94 */
95setup_604_hid0:
96 mfspr r11,SPRN_HID0
97 ori r11,r11,HID0_SIED|HID0_BHTE
98 ori r8,r11,HID0_BTCD
99 sync
100 mtspr SPRN_HID0,r8 /* flush branch target address cache */
101 sync /* on 604e/604r */
102 mtspr SPRN_HID0,r11
103 sync
104 isync
105 blr
106
107/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
108 * erratas we work around here.
109 * Moto MPC710CE.pdf describes them, those are errata
110 * #3, #4 and #5
111 * Note that we assume the firmware didn't choose to
112 * apply other workarounds (there are other ones documented
113 * in the .pdf). It appear that Apple firmware only works
114 * around #3 and with the same fix we use. We may want to
115 * check if the CPU is using 60x bus mode in which case
116 * the workaround for errata #4 is useless. Also, we may
117 * want to explicitely clear HID0_NOPDST as this is not
118 * needed once we have applied workaround #5 (though it's
119 * not set by Apple's firmware at least).
120 */
121setup_7400_workarounds:
122 mfpvr r3
123 rlwinm r3,r3,0,20,31
124 cmpwi 0,r3,0x0207
125 ble 1f
126 blr
127setup_7410_workarounds:
128 mfpvr r3
129 rlwinm r3,r3,0,20,31
130 cmpwi 0,r3,0x0100
131 bnelr
1321:
133 mfspr r11,SPRN_MSSSR0
134 /* Errata #3: Set L1OPQ_SIZE to 0x10 */
135 rlwinm r11,r11,0,9,6
136 oris r11,r11,0x0100
137 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
138 oris r11,r11,0x0002
139 /* Errata #5: Set DRLT_SIZE to 0x01 */
140 rlwinm r11,r11,0,5,2
141 oris r11,r11,0x0800
142 sync
143 mtspr SPRN_MSSSR0,r11
144 sync
145 isync
146 blr
147
148/* 740/750/7400/7410
149 * Enable Store Gathering (SGE), Address Brodcast (ABE),
150 * Branch History Table (BHTE), Branch Target ICache (BTIC)
151 * Dynamic Power Management (DPM), Speculative (SPD)
152 * Clear Instruction cache throttling (ICTC)
153 */
154setup_750_7400_hid0:
155 mfspr r11,SPRN_HID0
156 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
157 oris r11,r11,HID0_DPM@h
158BEGIN_FTR_SECTION
159 xori r11,r11,HID0_BTIC
160END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
161BEGIN_FTR_SECTION
162 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
163END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
164 li r3,HID0_SPD
165 andc r11,r11,r3 /* clear SPD: enable speculative */
166 li r3,0
167 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
168 isync
169 mtspr SPRN_HID0,r11
170 sync
171 isync
172 blr
173
174/* 750cx specific
175 * Looks like we have to disable NAP feature for some PLL settings...
176 * (waiting for confirmation)
177 */
178setup_750cx:
179 mfspr r10, SPRN_HID1
180 rlwinm r10,r10,4,28,31
181 cmpwi cr0,r10,7
182 cmpwi cr1,r10,9
183 cmpwi cr2,r10,11
184 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
185 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
186 bnelr
187 lwz r6,CPU_SPEC_FEATURES(r5)
188 li r7,CPU_FTR_CAN_NAP
189 andc r6,r6,r7
190 stw r6,CPU_SPEC_FEATURES(r5)
191 blr
192
193/* 750fx specific
194 */
195setup_750fx:
196 blr
197
198/* MPC 745x
199 * Enable Store Gathering (SGE), Branch Folding (FOLD)
200 * Branch History Table (BHTE), Branch Target ICache (BTIC)
201 * Dynamic Power Management (DPM), Speculative (SPD)
202 * Ensure our data cache instructions really operate.
203 * Timebase has to be running or we wouldn't have made it here,
204 * just ensure we don't disable it.
205 * Clear Instruction cache throttling (ICTC)
206 * Enable L2 HW prefetch
207 */
208setup_745x_specifics:
209 /* We check for the presence of an L3 cache setup by
210 * the firmware. If any, we disable NAP capability as
211 * it's known to be bogus on rev 2.1 and earlier
212 */
213 mfspr r11,SPRN_L3CR
214 andis. r11,r11,L3CR_L3E@h
215 beq 1f
216 lwz r6,CPU_SPEC_FEATURES(r5)
217 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
218 beq 1f
219 li r7,CPU_FTR_CAN_NAP
220 andc r6,r6,r7
221 stw r6,CPU_SPEC_FEATURES(r5)
2221:
223 mfspr r11,SPRN_HID0
224
225 /* All of the bits we have to set.....
226 */
227 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
228 ori r11,r11,HID0_LRSTK | HID0_BTIC
229 oris r11,r11,HID0_DPM@h
230BEGIN_FTR_SECTION
231 xori r11,r11,HID0_BTIC
232END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
233BEGIN_FTR_SECTION
234 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
235END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
236
237 /* All of the bits we have to clear....
238 */
239 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
240 andc r11,r11,r3 /* clear SPD: enable speculative */
241 li r3,0
242
243 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
244 isync
245 mtspr SPRN_HID0,r11
246 sync
247 isync
248
249 /* Enable L2 HW prefetch, if L2 is enabled
250 */
251 mfspr r3,SPRN_L2CR
252 andis. r3,r3,L2CR_L2E@h
253 beqlr
254 mfspr r3,SPRN_MSSCR0
255 ori r3,r3,3
256 sync
257 mtspr SPRN_MSSCR0,r3
258 sync
259 isync
260 blr
261
262/*
263 * Initialize the FPU registers. This is needed to work around an errata
264 * in some 750 cpus where using a not yet initialized FPU register after
265 * power on reset may hang the CPU
266 */
267_GLOBAL(__init_fpu_registers)
268 mfmsr r10
269 ori r11,r10,MSR_FP
270 mtmsr r11
271 isync
272 addis r9,r3,empty_zero_page@ha
273 addi r9,r9,empty_zero_page@l
274 REST_32FPRS(0,r9)
275 sync
276 mtmsr r10
277 isync
278 blr
279
280
281/* Definitions for the table use to save CPU states */
282#define CS_HID0 0
283#define CS_HID1 4
284#define CS_HID2 8
285#define CS_MSSCR0 12
286#define CS_MSSSR0 16
287#define CS_ICTRL 20
288#define CS_LDSTCR 24
289#define CS_LDSTDB 28
290#define CS_SIZE 32
291
292 .data
293 .balign L1_CACHE_BYTES
294cpu_state_storage:
295 .space CS_SIZE
296 .balign L1_CACHE_BYTES,0
297 .text
298
299/* Called in normal context to backup CPU 0 state. This
300 * does not include cache settings. This function is also
301 * called for machine sleep. This does not include the MMU
302 * setup, BATs, etc... but rather the "special" registers
303 * like HID0, HID1, MSSCR0, etc...
304 */
305_GLOBAL(__save_cpu_setup)
306 /* Some CR fields are volatile, we back it up all */
307 mfcr r7
308
309 /* Get storage ptr */
310 lis r5,cpu_state_storage@h
311 ori r5,r5,cpu_state_storage@l
312
313 /* Save HID0 (common to all CONFIG_6xx cpus) */
314 mfspr r3,SPRN_HID0
315 stw r3,CS_HID0(r5)
316
317 /* Now deal with CPU type dependent registers */
318 mfspr r3,SPRN_PVR
319 srwi r3,r3,16
320 cmplwi cr0,r3,0x8000 /* 7450 */
321 cmplwi cr1,r3,0x000c /* 7400 */
322 cmplwi cr2,r3,0x800c /* 7410 */
323 cmplwi cr3,r3,0x8001 /* 7455 */
324 cmplwi cr4,r3,0x8002 /* 7457 */
325 cmplwi cr5,r3,0x8003 /* 7447A */
326 cmplwi cr6,r3,0x7000 /* 750FX */
327 cmplwi cr7,r3,0x8004 /* 7448 */
328 /* cr1 is 7400 || 7410 */
329 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
330 /* cr0 is 74xx */
331 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
332 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
333 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
334 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
335 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
336 bne 1f
337 /* Backup 74xx specific regs */
338 mfspr r4,SPRN_MSSCR0
339 stw r4,CS_MSSCR0(r5)
340 mfspr r4,SPRN_MSSSR0
341 stw r4,CS_MSSSR0(r5)
342 beq cr1,1f
343 /* Backup 745x specific registers */
344 mfspr r4,SPRN_HID1
345 stw r4,CS_HID1(r5)
346 mfspr r4,SPRN_ICTRL
347 stw r4,CS_ICTRL(r5)
348 mfspr r4,SPRN_LDSTCR
349 stw r4,CS_LDSTCR(r5)
350 mfspr r4,SPRN_LDSTDB
351 stw r4,CS_LDSTDB(r5)
3521:
353 bne cr6,1f
354 /* Backup 750FX specific registers */
355 mfspr r4,SPRN_HID1
356 stw r4,CS_HID1(r5)
357 /* If rev 2.x, backup HID2 */
358 mfspr r3,SPRN_PVR
359 andi. r3,r3,0xff00
360 cmpwi cr0,r3,0x0200
361 bne 1f
362 mfspr r4,SPRN_HID2
363 stw r4,CS_HID2(r5)
3641:
365 mtcr r7
366 blr
367
368/* Called with no MMU context (typically MSR:IR/DR off) to
369 * restore CPU state as backed up by the previous
370 * function. This does not include cache setting
371 */
372_GLOBAL(__restore_cpu_setup)
373 /* Some CR fields are volatile, we back it up all */
374 mfcr r7
375
376 /* Get storage ptr */
377 lis r5,(cpu_state_storage-KERNELBASE)@h
378 ori r5,r5,cpu_state_storage@l
379
380 /* Restore HID0 */
381 lwz r3,CS_HID0(r5)
382 sync
383 isync
384 mtspr SPRN_HID0,r3
385 sync
386 isync
387
388 /* Now deal with CPU type dependent registers */
389 mfspr r3,SPRN_PVR
390 srwi r3,r3,16
391 cmplwi cr0,r3,0x8000 /* 7450 */
392 cmplwi cr1,r3,0x000c /* 7400 */
393 cmplwi cr2,r3,0x800c /* 7410 */
394 cmplwi cr3,r3,0x8001 /* 7455 */
395 cmplwi cr4,r3,0x8002 /* 7457 */
396 cmplwi cr5,r3,0x8003 /* 7447A */
397 cmplwi cr6,r3,0x7000 /* 750FX */
398 cmplwi cr7,r3,0x8004 /* 7448 */
399 /* cr1 is 7400 || 7410 */
400 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
401 /* cr0 is 74xx */
402 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
403 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
404 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
405 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
406 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
407 bne 2f
408 /* Restore 74xx specific regs */
409 lwz r4,CS_MSSCR0(r5)
410 sync
411 mtspr SPRN_MSSCR0,r4
412 sync
413 isync
414 lwz r4,CS_MSSSR0(r5)
415 sync
416 mtspr SPRN_MSSSR0,r4
417 sync
418 isync
419 bne cr2,1f
420 /* Clear 7410 L2CR2 */
421 li r4,0
422 mtspr SPRN_L2CR2,r4
4231: beq cr1,2f
424 /* Restore 745x specific registers */
425 lwz r4,CS_HID1(r5)
426 sync
427 mtspr SPRN_HID1,r4
428 isync
429 sync
430 lwz r4,CS_ICTRL(r5)
431 sync
432 mtspr SPRN_ICTRL,r4
433 isync
434 sync
435 lwz r4,CS_LDSTCR(r5)
436 sync
437 mtspr SPRN_LDSTCR,r4
438 isync
439 sync
440 lwz r4,CS_LDSTDB(r5)
441 sync
442 mtspr SPRN_LDSTDB,r4
443 isync
444 sync
4452: bne cr6,1f
446 /* Restore 750FX specific registers
447 * that is restore HID2 on rev 2.x and PLL config & switch
448 * to PLL 0 on all
449 */
450 /* If rev 2.x, restore HID2 with low voltage bit cleared */
451 mfspr r3,SPRN_PVR
452 andi. r3,r3,0xff00
453 cmpwi cr0,r3,0x0200
454 bne 4f
455 lwz r4,CS_HID2(r5)
456 rlwinm r4,r4,0,19,17
457 mtspr SPRN_HID2,r4
458 sync
4594:
460 lwz r4,CS_HID1(r5)
461 rlwinm r5,r4,0,16,14
462 mtspr SPRN_HID1,r5
463 /* Wait for PLL to stabilize */
464 mftbl r5
4653: mftbl r6
466 sub r6,r6,r5
467 cmplwi cr0,r6,10000
468 ble 3b
469 /* Setup final PLL */
470 mtspr SPRN_HID1,r4
4711:
472 mtcr r7
473 blr
474
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 3a2815978488..5891ecbdc703 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -135,10 +135,10 @@ transfer_to_handler:
135 mfspr r11,SPRN_HID0 135 mfspr r11,SPRN_HID0
136 mtcr r11 136 mtcr r11
137BEGIN_FTR_SECTION 137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */ 138 bt- 8,4f /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) 139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION 140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */ 141 bt- 9,4f /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */ 143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont 144 .globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
157 SYNC 157 SYNC
158 RFI /* jump to handler, enable MMU */ 158 RFI /* jump to handler, enable MMU */
159 159
160#ifdef CONFIG_6xx
1614: b power_save_6xx_restore
162#endif
163
160/* 164/*
161 * On kernel stack overflow, load up an initial stack pointer 165 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return. 166 * and call StackOverflow(regs), which should not return.
@@ -926,55 +930,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
926 b 4b 930 b 4b
927 931
928 .comm ee_restarts,4 932 .comm ee_restarts,4
929
930/*
931 * PROM code for specific machines follows. Put it
932 * here so it's easy to add arch-specific sections later.
933 * -- Cort
934 */
935#ifdef CONFIG_PPC_OF
936/*
937 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
938 * called with the MMU off.
939 */
940_GLOBAL(enter_rtas)
941 stwu r1,-INT_FRAME_SIZE(r1)
942 mflr r0
943 stw r0,INT_FRAME_SIZE+4(r1)
944 lis r4,rtas_data@ha
945 lwz r4,rtas_data@l(r4)
946 lis r6,1f@ha /* physical return address for rtas */
947 addi r6,r6,1f@l
948 tophys(r6,r6)
949 tophys(r7,r1)
950 lis r8,rtas_entry@ha
951 lwz r8,rtas_entry@l(r8)
952 mfmsr r9
953 stw r9,8(r1)
954 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
955 SYNC /* disable interrupts so SRR0/1 */
956 MTMSRD(r0) /* don't get trashed */
957 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
958 mtlr r6
959 CLR_TOP32(r7)
960 mtspr SPRN_SPRG2,r7
961 mtspr SPRN_SRR0,r8
962 mtspr SPRN_SRR1,r9
963 RFI
9641: tophys(r9,r1)
965 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
966 lwz r9,8(r9) /* original msr value */
967 FIX_SRR1(r9,r0)
968 addi r1,r1,INT_FRAME_SIZE
969 li r0,0
970 mtspr SPRN_SPRG2,r0
971 mtspr SPRN_SRR0,r8
972 mtspr SPRN_SRR1,r9
973 RFI /* return to caller */
974
975 .globl machine_check_in_rtas
976machine_check_in_rtas:
977 twi 31,0,0
978 /* XXX load up BATs and panic */
979
980#endif /* CONFIG_PPC_OF */
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 53ea845fb911..01303efeddad 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -37,19 +37,6 @@
37#include <asm/amigappc.h> 37#include <asm/amigappc.h>
38#endif 38#endif
39 39
40#ifdef CONFIG_PPC64BRIDGE
41#define LOAD_BAT(n, reg, RA, RB) \
42 ld RA,(n*32)+0(reg); \
43 ld RB,(n*32)+8(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB; \
46 ld RA,(n*32)+16(reg); \
47 ld RB,(n*32)+24(reg); \
48 mtspr SPRN_DBAT##n##U,RA; \
49 mtspr SPRN_DBAT##n##L,RB; \
50
51#else /* CONFIG_PPC64BRIDGE */
52
53/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
54#define LOAD_BAT(n, reg, RA, RB) \ 41#define LOAD_BAT(n, reg, RA, RB) \
55 /* see the comment for clear_bats() -- Cort */ \ 42 /* see the comment for clear_bats() -- Cort */ \
@@ -66,7 +53,6 @@
66 mtspr SPRN_DBAT##n##U,RA; \ 53 mtspr SPRN_DBAT##n##U,RA; \
67 mtspr SPRN_DBAT##n##L,RB; \ 54 mtspr SPRN_DBAT##n##L,RB; \
681: 551:
69#endif /* CONFIG_PPC64BRIDGE */
70 56
71 .text 57 .text
72 .stabs "arch/ppc/kernel/",N_SO,0,0,0f 58 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
@@ -129,11 +115,6 @@ _start:
129 115
130 .globl __start 116 .globl __start
131__start: 117__start:
132/*
133 * We have to do any OF calls before we map ourselves to KERNELBASE,
134 * because OF may have I/O devices mapped into that area
135 * (particularly on CHRP).
136 */
137 mr r31,r3 /* save parameters */ 118 mr r31,r3 /* save parameters */
138 mr r30,r4 119 mr r30,r4
139 mr r29,r5 120 mr r29,r5
@@ -148,14 +129,6 @@ __start:
148 */ 129 */
149 bl early_init 130 bl early_init
150 131
151/*
152 * On POWER4, we first need to tweak some CPU configuration registers
153 * like real mode cache inhibit or exception base
154 */
155#ifdef CONFIG_POWER4
156 bl __970_cpu_preinit
157#endif /* CONFIG_POWER4 */
158
159#ifdef CONFIG_APUS 132#ifdef CONFIG_APUS
160/* On APUS the __va/__pa constants need to be set to the correct 133/* On APUS the __va/__pa constants need to be set to the correct
161 * values before continuing. 134 * values before continuing.
@@ -169,7 +142,6 @@ __start:
169 */ 142 */
170 bl mmu_off 143 bl mmu_off
171__after_mmu_off: 144__after_mmu_off:
172#ifndef CONFIG_POWER4
173 bl clear_bats 145 bl clear_bats
174 bl flush_tlbs 146 bl flush_tlbs
175 147
@@ -177,10 +149,6 @@ __after_mmu_off:
177#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) 149#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
178 bl setup_disp_bat 150 bl setup_disp_bat
179#endif 151#endif
180#else /* CONFIG_POWER4 */
181 bl reloc_offset
182 bl initial_mm_power4
183#endif /* CONFIG_POWER4 */
184 152
185/* 153/*
186 * Call setup_cpu for CPU 0 and initialize 6xx Idle 154 * Call setup_cpu for CPU 0 and initialize 6xx Idle
@@ -192,18 +160,11 @@ __after_mmu_off:
192 bl reloc_offset 160 bl reloc_offset
193 bl init_idle_6xx 161 bl init_idle_6xx
194#endif /* CONFIG_6xx */ 162#endif /* CONFIG_6xx */
195#ifdef CONFIG_POWER4
196 bl reloc_offset
197 bl init_idle_power4
198#endif /* CONFIG_POWER4 */
199 163
200 164
201#ifndef CONFIG_APUS 165#ifndef CONFIG_APUS
202/* 166/*
203 * We need to run with _start at physical address 0. 167 * We need to run with _start at physical address 0.
204 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
205 * the exception vectors at 0 (and therefore this copy
206 * overwrites OF's exception vectors with our own).
207 * If the MMU is already turned on, we copy stuff to KERNELBASE, 168 * If the MMU is already turned on, we copy stuff to KERNELBASE,
208 * otherwise we copy it to 0. 169 * otherwise we copy it to 0.
209 */ 170 */
@@ -358,51 +319,19 @@ i##n: \
358#endif 319#endif
359 320
360/* Machine check */ 321/* Machine check */
361/*
362 * On CHRP, this is complicated by the fact that we could get a
363 * machine check inside RTAS, and we have no guarantee that certain
364 * critical registers will have the values we expect. The set of
365 * registers that might have bad values includes all the GPRs
366 * and all the BATs. We indicate that we are in RTAS by putting
367 * a non-zero value, the address of the exception frame to use,
368 * in SPRG2. The machine check handler checks SPRG2 and uses its
369 * value if it is non-zero. If we ever needed to free up SPRG2,
370 * we could use a field in the thread_info or thread_struct instead.
371 * (Other exception handlers assume that r1 is a valid kernel stack
372 * pointer when we take an exception from supervisor mode.)
373 * -- paulus.
374 */
375 . = 0x200 322 . = 0x200
376 mtspr SPRN_SPRG0,r10 323 mtspr SPRN_SPRG0,r10
377 mtspr SPRN_SPRG1,r11 324 mtspr SPRN_SPRG1,r11
378 mfcr r10 325 mfcr r10
379#ifdef CONFIG_PPC_CHRP
380 mfspr r11,SPRN_SPRG2
381 cmpwi 0,r11,0
382 bne 7f
383#endif /* CONFIG_PPC_CHRP */
384 EXCEPTION_PROLOG_1 326 EXCEPTION_PROLOG_1
3857: EXCEPTION_PROLOG_2 3277: EXCEPTION_PROLOG_2
386 addi r3,r1,STACK_FRAME_OVERHEAD 328 addi r3,r1,STACK_FRAME_OVERHEAD
387#ifdef CONFIG_PPC_CHRP
388 mfspr r4,SPRN_SPRG2
389 cmpwi cr1,r4,0
390 bne cr1,1f
391#endif
392 EXC_XFER_STD(0x200, machine_check_exception) 329 EXC_XFER_STD(0x200, machine_check_exception)
393#ifdef CONFIG_PPC_CHRP
3941: b machine_check_in_rtas
395#endif
396 330
397/* Data access exception. */ 331/* Data access exception. */
398 . = 0x300 332 . = 0x300
399#ifdef CONFIG_PPC64BRIDGE
400 b DataAccess
401DataAccessCont:
402#else
403DataAccess: 333DataAccess:
404 EXCEPTION_PROLOG 334 EXCEPTION_PROLOG
405#endif /* CONFIG_PPC64BRIDGE */
406 mfspr r10,SPRN_DSISR 335 mfspr r10,SPRN_DSISR
407 andis. r0,r10,0xa470 /* weird error? */ 336 andis. r0,r10,0xa470 /* weird error? */
408 bne 1f /* if not, try to put a PTE */ 337 bne 1f /* if not, try to put a PTE */
@@ -414,21 +343,10 @@ DataAccess:
414 mfspr r4,SPRN_DAR 343 mfspr r4,SPRN_DAR
415 EXC_XFER_EE_LITE(0x300, handle_page_fault) 344 EXC_XFER_EE_LITE(0x300, handle_page_fault)
416 345
417#ifdef CONFIG_PPC64BRIDGE
418/* SLB fault on data access. */
419 . = 0x380
420 b DataSegment
421#endif /* CONFIG_PPC64BRIDGE */
422
423/* Instruction access exception. */ 346/* Instruction access exception. */
424 . = 0x400 347 . = 0x400
425#ifdef CONFIG_PPC64BRIDGE
426 b InstructionAccess
427InstructionAccessCont:
428#else
429InstructionAccess: 348InstructionAccess:
430 EXCEPTION_PROLOG 349 EXCEPTION_PROLOG
431#endif /* CONFIG_PPC64BRIDGE */
432 andis. r0,r9,0x4000 /* no pte found? */ 350 andis. r0,r9,0x4000 /* no pte found? */
433 beq 1f /* if so, try to put a PTE */ 351 beq 1f /* if so, try to put a PTE */
434 li r3,0 /* into the hash table */ 352 li r3,0 /* into the hash table */
@@ -438,12 +356,6 @@ InstructionAccess:
438 mr r5,r9 356 mr r5,r9
439 EXC_XFER_EE_LITE(0x400, handle_page_fault) 357 EXC_XFER_EE_LITE(0x400, handle_page_fault)
440 358
441#ifdef CONFIG_PPC64BRIDGE
442/* SLB fault on instruction access. */
443 . = 0x480
444 b InstructionSegment
445#endif /* CONFIG_PPC64BRIDGE */
446
447/* External interrupt */ 359/* External interrupt */
448 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 360 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
449 361
@@ -708,15 +620,9 @@ DataStoreTLBMiss:
708 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE) 620 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) 621 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
710 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) 622 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
711#ifdef CONFIG_POWER4
712 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
715#else /* !CONFIG_POWER4 */
716 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE) 623 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) 624 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
718 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) 625 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
719#endif /* CONFIG_POWER4 */
720 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) 626 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) 627 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) 628 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
@@ -754,28 +660,6 @@ AltiVecUnavailable:
754 addi r3,r1,STACK_FRAME_OVERHEAD 660 addi r3,r1,STACK_FRAME_OVERHEAD
755 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) 661 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
756 662
757#ifdef CONFIG_PPC64BRIDGE
758DataAccess:
759 EXCEPTION_PROLOG
760 b DataAccessCont
761
762InstructionAccess:
763 EXCEPTION_PROLOG
764 b InstructionAccessCont
765
766DataSegment:
767 EXCEPTION_PROLOG
768 addi r3,r1,STACK_FRAME_OVERHEAD
769 mfspr r4,SPRN_DAR
770 stw r4,_DAR(r11)
771 EXC_XFER_STD(0x380, unknown_exception)
772
773InstructionSegment:
774 EXCEPTION_PROLOG
775 addi r3,r1,STACK_FRAME_OVERHEAD
776 EXC_XFER_STD(0x480, unknown_exception)
777#endif /* CONFIG_PPC64BRIDGE */
778
779#ifdef CONFIG_ALTIVEC 663#ifdef CONFIG_ALTIVEC
780/* Note that the AltiVec support is closely modeled after the FP 664/* Note that the AltiVec support is closely modeled after the FP
781 * support. Changes to one are likely to be applicable to the 665 * support. Changes to one are likely to be applicable to the
@@ -1048,13 +932,6 @@ __secondary_start_pmac_0:
1048 932
1049 .globl __secondary_start 933 .globl __secondary_start
1050__secondary_start: 934__secondary_start:
1051#ifdef CONFIG_PPC64BRIDGE
1052 mfmsr r0
1053 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
1054 SYNC
1055 MTMSRD(r0)
1056 isync
1057#endif
1058 /* Copy some CPU settings from CPU 0 */ 935 /* Copy some CPU settings from CPU 0 */
1059 bl __restore_cpu_setup 936 bl __restore_cpu_setup
1060 937
@@ -1065,10 +942,6 @@ __secondary_start:
1065 lis r3,-KERNELBASE@h 942 lis r3,-KERNELBASE@h
1066 bl init_idle_6xx 943 bl init_idle_6xx
1067#endif /* CONFIG_6xx */ 944#endif /* CONFIG_6xx */
1068#ifdef CONFIG_POWER4
1069 lis r3,-KERNELBASE@h
1070 bl init_idle_power4
1071#endif /* CONFIG_POWER4 */
1072 945
1073 /* get current_thread_info and current */ 946 /* get current_thread_info and current */
1074 lis r1,secondary_ti@ha 947 lis r1,secondary_ti@ha
@@ -1109,12 +982,12 @@ __secondary_start:
1109 * Those generic dummy functions are kept for CPUs not 982 * Those generic dummy functions are kept for CPUs not
1110 * included in CONFIG_6xx 983 * included in CONFIG_6xx
1111 */ 984 */
1112#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) 985#if !defined(CONFIG_6xx)
1113_GLOBAL(__save_cpu_setup) 986_GLOBAL(__save_cpu_setup)
1114 blr 987 blr
1115_GLOBAL(__restore_cpu_setup) 988_GLOBAL(__restore_cpu_setup)
1116 blr 989 blr
1117#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ 990#endif /* !defined(CONFIG_6xx) */
1118 991
1119 992
1120/* 993/*
@@ -1132,11 +1005,6 @@ load_up_mmu:
1132 tophys(r6,r6) 1005 tophys(r6,r6)
1133 lwz r6,_SDR1@l(r6) 1006 lwz r6,_SDR1@l(r6)
1134 mtspr SPRN_SDR1,r6 1007 mtspr SPRN_SDR1,r6
1135#ifdef CONFIG_PPC64BRIDGE
1136 /* clear the ASR so we only use the pseudo-segment registers. */
1137 li r6,0
1138 mtasr r6
1139#endif /* CONFIG_PPC64BRIDGE */
1140 li r0,16 /* load up segment register values */ 1008 li r0,16 /* load up segment register values */
1141 mtctr r0 /* for context 0 */ 1009 mtctr r0 /* for context 0 */
1142 lis r3,0x2000 /* Ku = 1, VSID = 0 */ 1010 lis r3,0x2000 /* Ku = 1, VSID = 0 */
@@ -1145,7 +1013,7 @@ load_up_mmu:
1145 addi r3,r3,0x111 /* increment VSID */ 1013 addi r3,r3,0x111 /* increment VSID */
1146 addis r4,r4,0x1000 /* address of next segment */ 1014 addis r4,r4,0x1000 /* address of next segment */
1147 bdnz 3b 1015 bdnz 3b
1148#ifndef CONFIG_POWER4 1016
1149/* Load the BAT registers with the values set up by MMU_init. 1017/* Load the BAT registers with the values set up by MMU_init.
1150 MMU_init takes care of whether we're on a 601 or not. */ 1018 MMU_init takes care of whether we're on a 601 or not. */
1151 mfpvr r3 1019 mfpvr r3
@@ -1158,7 +1026,7 @@ load_up_mmu:
1158 LOAD_BAT(1,r3,r4,r5) 1026 LOAD_BAT(1,r3,r4,r5)
1159 LOAD_BAT(2,r3,r4,r5) 1027 LOAD_BAT(2,r3,r4,r5)
1160 LOAD_BAT(3,r3,r4,r5) 1028 LOAD_BAT(3,r3,r4,r5)
1161#endif /* CONFIG_POWER4 */ 1029
1162 blr 1030 blr
1163 1031
1164/* 1032/*
@@ -1269,9 +1137,6 @@ _GLOBAL(set_context)
1269 li r4,0 1137 li r4,0
1270 isync 1138 isync
12713: 11393:
1272#ifdef CONFIG_PPC64BRIDGE
1273 slbie r4
1274#endif /* CONFIG_PPC64BRIDGE */
1275 mtsrin r3,r4 1140 mtsrin r3,r4
1276 addi r3,r3,0x111 /* next VSID */ 1141 addi r3,r3,0x111 /* next VSID */
1277 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ 1142 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
@@ -1358,7 +1223,6 @@ mmu_off:
1358 sync 1223 sync
1359 RFI 1224 RFI
1360 1225
1361#ifndef CONFIG_POWER4
1362/* 1226/*
1363 * Use the first pair of BAT registers to map the 1st 16MB 1227 * Use the first pair of BAT registers to map the 1st 16MB
1364 * of RAM to KERNELBASE. From this point on we can't safely 1228 * of RAM to KERNELBASE. From this point on we can't safely
@@ -1366,7 +1230,6 @@ mmu_off:
1366 */ 1230 */
1367initial_bats: 1231initial_bats:
1368 lis r11,KERNELBASE@h 1232 lis r11,KERNELBASE@h
1369#ifndef CONFIG_PPC64BRIDGE
1370 mfspr r9,SPRN_PVR 1233 mfspr r9,SPRN_PVR
1371 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 1234 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1372 cmpwi 0,r9,1 1235 cmpwi 0,r9,1
@@ -1381,7 +1244,6 @@ initial_bats:
1381 mtspr SPRN_IBAT1L,r10 1244 mtspr SPRN_IBAT1L,r10
1382 isync 1245 isync
1383 blr 1246 blr
1384#endif /* CONFIG_PPC64BRIDGE */
1385 1247
13864: tophys(r8,r11) 12484: tophys(r8,r11)
1387#ifdef CONFIG_SMP 1249#ifdef CONFIG_SMP
@@ -1395,11 +1257,6 @@ initial_bats:
1395 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ 1257 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1396#endif /* CONFIG_APUS */ 1258#endif /* CONFIG_APUS */
1397 1259
1398#ifdef CONFIG_PPC64BRIDGE
1399 /* clear out the high 32 bits in the BAT */
1400 clrldi r11,r11,32
1401 clrldi r8,r8,32
1402#endif /* CONFIG_PPC64BRIDGE */
1403 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ 1260 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1404 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ 1261 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1405 mtspr SPRN_IBAT0L,r8 1262 mtspr SPRN_IBAT0L,r8
@@ -1432,38 +1289,6 @@ setup_disp_bat:
1432 1289
1433#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ 1290#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
1434 1291
1435#else /* CONFIG_POWER4 */
1436/*
1437 * Load up the SDR1 and segment register values now
1438 * since we don't have the BATs.
1439 * Also make sure we are running in 32-bit mode.
1440 */
1441
1442initial_mm_power4:
1443 addis r14,r3,_SDR1@ha /* get the value from _SDR1 */
1444 lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */
1445 mtspr SPRN_SDR1,r14
1446 slbia
1447 lis r4,0x2000 /* set pseudo-segment reg 12 */
1448 ori r5,r4,0x0ccc
1449 mtsr 12,r5
1450#if 0
1451 ori r5,r4,0x0888 /* set pseudo-segment reg 8 */
1452 mtsr 8,r5 /* (for access to serial port) */
1453#endif
1454#ifdef CONFIG_BOOTX_TEXT
1455 ori r5,r4,0x0999 /* set pseudo-segment reg 9 */
1456 mtsr 9,r5 /* (for access to screen) */
1457#endif
1458 mfmsr r0
1459 clrldi r0,r0,1
1460 sync
1461 mtmsr r0
1462 isync
1463 blr
1464
1465#endif /* CONFIG_POWER4 */
1466
1467#ifdef CONFIG_8260 1292#ifdef CONFIG_8260
1468/* Jump into the system reset for the rom. 1293/* Jump into the system reset for the rom.
1469 * We first disable the MMU, and then jump to the ROM reset address. 1294 * We first disable the MMU, and then jump to the ROM reset address.
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
deleted file mode 100644
index 1be3ca5bae40..000000000000
--- a/arch/ppc/kernel/idle.c
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Idle daemon for PowerPC. Idle daemon will handle any action
3 * that needs to be taken when the system becomes idle.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked
6 * on by Tom Rini, Armin Kuster, Paul Mackerras and others.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/cpu.h>
26
27#include <asm/pgtable.h>
28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <asm/io.h>
31#include <asm/mmu.h>
32#include <asm/cache.h>
33#include <asm/cputable.h>
34#include <asm/machdep.h>
35#include <asm/smp.h>
36
37void default_idle(void)
38{
39 void (*powersave)(void);
40
41 powersave = ppc_md.power_save;
42
43 if (!need_resched()) {
44 if (powersave != NULL)
45 powersave();
46#ifdef CONFIG_SMP
47 else {
48 set_thread_flag(TIF_POLLING_NRFLAG);
49 while (!need_resched() &&
50 !cpu_is_offline(smp_processor_id()))
51 barrier();
52 clear_thread_flag(TIF_POLLING_NRFLAG);
53 }
54#endif
55 }
56}
57
58/*
59 * The body of the idle task.
60 */
61void cpu_idle(void)
62{
63 int cpu = smp_processor_id();
64
65 for (;;) {
66 while (!need_resched()) {
67 if (ppc_md.idle != NULL)
68 ppc_md.idle();
69 else
70 default_idle();
71 }
72
73 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
74 cpu_die();
75 preempt_enable_no_resched();
76 schedule();
77 preempt_disable();
78 }
79}
80
81#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
82/*
83 * Register the sysctl to set/clear powersave_nap.
84 */
85extern int powersave_nap;
86
87static ctl_table powersave_nap_ctl_table[]={
88 {
89 .ctl_name = KERN_PPC_POWERSAVE_NAP,
90 .procname = "powersave-nap",
91 .data = &powersave_nap,
92 .maxlen = sizeof(int),
93 .mode = 0644,
94 .proc_handler = &proc_dointvec,
95 },
96 { 0, },
97};
98static ctl_table powersave_nap_sysctl_root[] = {
99 { 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, },
100 { 0,},
101};
102
103static int __init
104register_powersave_nap_sysctl(void)
105{
106 register_sysctl_table(powersave_nap_sysctl_root, 0);
107
108 return 0;
109}
110
111__initcall(register_powersave_nap_sysctl);
112#endif
diff --git a/arch/ppc/kernel/idle_6xx.S b/arch/ppc/kernel/idle_6xx.S
deleted file mode 100644
index 1a2194cf6828..000000000000
--- a/arch/ppc/kernel/idle_6xx.S
+++ /dev/null
@@ -1,233 +0,0 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h
105 beq 2f
106BEGIN_FTR_SECTION
107 /* Disable L2 prefetch on some 745x and try to ensure
108 * L2 prefetch engines are idle. As explained by errata
109 * text, we can't be sure they are, we just hope very hard
110 * that well be enough (sic !). At least I noticed Apple
111 * doesn't even bother doing the dcbf's here...
112 */
113 mfspr r4,SPRN_MSSCR0
114 rlwinm r4,r4,0,0,29
115 sync
116 mtspr SPRN_MSSCR0,r4
117 sync
118 isync
119 lis r4,KERNELBASE@h
120 dcbf 0,r4
121 dcbf 0,r4
122 dcbf 0,r4
123 dcbf 0,r4
124END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
125#ifdef DEBUG
126 lis r6,nap_enter_count@ha
127 lwz r4,nap_enter_count@l(r6)
128 addi r4,r4,1
129 stw r4,nap_enter_count@l(r6)
130#endif
1312:
132BEGIN_FTR_SECTION
133 /* Go to low speed mode on some 750FX */
134 lis r4,powersave_lowspeed@ha
135 lwz r4,powersave_lowspeed@l(r4)
136 cmpwi 0,r4,0
137 beq 1f
138 mfspr r4,SPRN_HID1
139 oris r4,r4,0x0001
140 mtspr SPRN_HID1,r4
1411:
142END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
143
144 /* Go to NAP or DOZE now */
145 mfspr r4,SPRN_HID0
146 lis r5,(HID0_NAP|HID0_SLEEP)@h
147BEGIN_FTR_SECTION
148 oris r5,r5,HID0_DOZE@h
149END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
150 andc r4,r4,r5
151 or r4,r4,r3
152BEGIN_FTR_SECTION
153 oris r4,r4,HID0_DPM@h /* that should be done once for all */
154END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
155 mtspr SPRN_HID0,r4
156BEGIN_FTR_SECTION
157 DSSALL
158 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */
161 oris r7,r7,MSR_POW@h
162 sync
163 isync
164 mtmsr r7
165 isync
166 sync
167 blr
168
169/*
170 * Return from NAP/DOZE mode, restore some CPU specific registers,
171 * we are called with DR/IR still off and r2 containing physical
172 * address of current.
173 */
174_GLOBAL(power_save_6xx_restore)
175 mfspr r11,SPRN_HID0
176 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
177 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
178BEGIN_FTR_SECTION
179 rlwinm r11,r11,0,9,7 /* Clear DOZE */
180END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
181 mtspr SPRN_HID0, r11
182
183#ifdef DEBUG
184 beq cr1,1f
185 lis r11,(nap_return_count-KERNELBASE)@ha
186 lwz r9,nap_return_count@l(r11)
187 addi r9,r9,1
188 stw r9,nap_return_count@l(r11)
1891:
190#endif
191
192 rlwinm r9,r1,0,0,18
193 tophys(r9,r9)
194 lwz r11,TI_CPU(r9)
195 slwi r11,r11,2
196 /* Todo make sure all these are in the same page
197 * and load r22 (@ha part + CPU offset) only once
198 */
199BEGIN_FTR_SECTION
200 beq cr1,1f
201 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
202 lwz r9,nap_save_msscr0@l(r9)
203 mtspr SPRN_MSSCR0, r9
204 sync
205 isync
2061:
207END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
208BEGIN_FTR_SECTION
209 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
210 lwz r9,nap_save_hid1@l(r9)
211 mtspr SPRN_HID1, r9
212END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
213 b transfer_to_handler_cont
214
215 .data
216
217_GLOBAL(nap_save_msscr0)
218 .space 4*NR_CPUS
219
220_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS
222
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed)
226 .long 0
227
228#ifdef DEBUG
229_GLOBAL(nap_enter_count)
230 .space 4
231_GLOBAL(nap_return_count)
232 .space 4
233#endif
diff --git a/arch/ppc/kernel/idle_power4.S b/arch/ppc/kernel/idle_power4.S
deleted file mode 100644
index cc0d535365cd..000000000000
--- a/arch/ppc/kernel/idle_power4.S
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * So nothing for now. Called with r24 containing CPU number and r3
32 * reloc offset
33 */
34 .globl init_idle_power4
35init_idle_power4:
36 blr
37
38/*
39 * Here is the power_save_6xx function. This could eventually be
40 * split into several functions & changing the function pointer
41 * depending on the various features.
42 */
43 .globl power4_idle
44power4_idle:
45BEGIN_FTR_SECTION
46 blr
47END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
48 /* We must dynamically check for the NAP feature as it
49 * can be cleared by CPU init after the fixups are done
50 */
51 lis r4,cur_cpu_spec@ha
52 lwz r4,cur_cpu_spec@l(r4)
53 lwz r4,CPU_SPEC_FEATURES(r4)
54 andi. r0,r4,CPU_FTR_CAN_NAP
55 beqlr
56 /* Now check if user or arch enabled NAP mode */
57 lis r4,powersave_nap@ha
58 lwz r4,powersave_nap@l(r4)
59 cmpwi 0,r4,0
60 beqlr
61
62 /* Clear MSR:EE */
63 mfmsr r7
64 rlwinm r0,r7,0,17,15
65 mtmsr r0
66
67 /* Check current_thread_info()->flags */
68 rlwinm r4,r1,0,0,18
69 lwz r4,TI_FLAGS(r4)
70 andi. r0,r4,_TIF_NEED_RESCHED
71 beq 1f
72 mtmsr r7 /* out of line this ? */
73 blr
741:
75 /* Go to NAP now */
76BEGIN_FTR_SECTION
77 DSSALL
78 sync
79END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
80 ori r7,r7,MSR_EE /* Could be ommited (already set) */
81 oris r7,r7,MSR_POW@h
82 sync
83 isync
84 mtmsr r7
85 isync
86 sync
87 blr
88
89 .globl powersave_nap
90powersave_nap:
91 .long 0
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
deleted file mode 100644
index d7f4e982b539..000000000000
--- a/arch/ppc/kernel/l2cr.S
+++ /dev/null
@@ -1,471 +0,0 @@
1/*
2 L2CR functions
3 Copyright © 1997-1998 by PowerLogix R & D, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19/*
20 Thur, Dec. 12, 1998.
21 - First public release, contributed by PowerLogix.
22 ***********
23 Sat, Aug. 7, 1999.
24 - Terry: Made sure code disabled interrupts before running. (Previously
25 it was assumed interrupts were already disabled).
26 - Terry: Updated for tentative G4 support. 4MB of memory is now flushed
27 instead of 2MB. (Prob. only 3 is necessary).
28 - Terry: Updated for workaround to HID0[DPM] processor bug
29 during global invalidates.
30 ***********
31 Thu, July 13, 2000.
32 - Terry: Added isync to correct for an errata.
33
34 22 August 2001.
35 - DanM: Finally added the 7450 patch I've had for the past
36 several months. The L2CR is similar, but I'm going
37 to assume the user of this functions knows what they
38 are doing.
39
40 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
41 Please e-mail updates to this file to me, thanks!
42*/
43#include <linux/config.h>
44#include <asm/processor.h>
45#include <asm/cputable.h>
46#include <asm/ppc_asm.h>
47#include <asm/cache.h>
48#include <asm/page.h>
49
50/* Usage:
51
52 When setting the L2CR register, you must do a few special
53 things. If you are enabling the cache, you must perform a
54 global invalidate. If you are disabling the cache, you must
55 flush the cache contents first. This routine takes care of
56 doing these things. When first enabling the cache, make sure
57 you pass in the L2CR you want, as well as passing in the
58 global invalidate bit set. A global invalidate will only be
59 performed if the L2I bit is set in applyThis. When enabling
60 the cache, you should also set the L2E bit in applyThis. If
61 you want to modify the L2CR contents after the cache has been
62 enabled, the recommended procedure is to first call
63 __setL2CR(0) to disable the cache and then call it again with
64 the new values for L2CR. Examples:
65
66 _setL2CR(0) - disables the cache
67 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
68 - L2E set to turn on the cache
69 - L2SIZ set to 1MB
70 - L2CLK set to 1:1
71 - L2RAM set to pipelined synchronous late-write
72 - L2I set to perform a global invalidation
73 - L2OH set to 0.5 nS
74 - L2DF set because this upgrade card
75 requires it
76
77 A similar call should work for your card. You need to know
78 the correct setting for your card and then place them in the
79 fields I have outlined above. Other fields support optional
80 features, such as L2DO which caches only data, or L2TS which
81 causes cache pushes from the L1 cache to go to the L2 cache
82 instead of to main memory.
83
84IMPORTANT:
85 Starting with the 7450, the bits in this register have moved
86 or behave differently. The Enable, Parity Enable, Size,
87 and L2 Invalidate are the only bits that have not moved.
88 The size is read-only for these processors with internal L2
89 cache, and the invalidate is a control as well as status.
90 -- Dan
91
92*/
93/*
94 * Summary: this procedure ignores the L2I bit in the value passed in,
95 * flushes the cache if it was already enabled, always invalidates the
96 * cache, then enables the cache if the L2E bit is set in the value
97 * passed in.
98 * -- paulus.
99 */
100_GLOBAL(_set_L2CR)
101 /* Make sure this is a 750 or 7400 chip */
102BEGIN_FTR_SECTION
103 li r3,-1
104 blr
105END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
106
107 mflr r9
108
109 /* Stop DST streams */
110BEGIN_FTR_SECTION
111 DSSALL
112 sync
113END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
114
115 /* Turn off interrupts and data relocation. */
116 mfmsr r7 /* Save MSR in r7 */
117 rlwinm r4,r7,0,17,15
118 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
119 sync
120 mtmsr r4
121 isync
122
123 /* Before we perform the global invalidation, we must disable dynamic
124 * power management via HID0[DPM] to work around a processor bug where
125 * DPM can possibly interfere with the state machine in the processor
126 * that invalidates the L2 cache tags.
127 */
128 mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
129 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
130 sync
131 mtspr SPRN_HID0,r4 /* Disable DPM */
132 sync
133
134 /* Get the current enable bit of the L2CR into r4 */
135 mfspr r4,SPRN_L2CR
136
137 /* Tweak some bits */
138 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
139 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
140 rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
141
142 /* Check to see if we need to flush */
143 rlwinm. r4,r4,0,0,0
144 beq 2f
145
146 /* Flush the cache. First, read the first 4MB of memory (physical) to
147 * put new data in the cache. (Actually we only need
148 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
149 * cover everything just to be safe).
150 */
151
152 /**** Might be a good idea to set L2DO here - to prevent instructions
153 from getting into the cache. But since we invalidate
154 the next time we enable the cache it doesn't really matter.
155 Don't do this unless you accomodate all processor variations.
156 The bit moved on the 7450.....
157 ****/
158
159BEGIN_FTR_SECTION
160 /* Disable L2 prefetch on some 745x and try to ensure
161 * L2 prefetch engines are idle. As explained by errata
162 * text, we can't be sure they are, we just hope very hard
163 * that well be enough (sic !). At least I noticed Apple
164 * doesn't even bother doing the dcbf's here...
165 */
166 mfspr r4,SPRN_MSSCR0
167 rlwinm r4,r4,0,0,29
168 sync
169 mtspr SPRN_MSSCR0,r4
170 sync
171 isync
172 lis r4,KERNELBASE@h
173 dcbf 0,r4
174 dcbf 0,r4
175 dcbf 0,r4
176 dcbf 0,r4
177END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
178
179 /* TODO: use HW flush assist when available */
180
181 lis r4,0x0002
182 mtctr r4
183 li r4,0
1841:
185 lwzx r0,r0,r4
186 addi r4,r4,32 /* Go to start of next cache line */
187 bdnz 1b
188 isync
189
190 /* Now, flush the first 4MB of memory */
191 lis r4,0x0002
192 mtctr r4
193 li r4,0
194 sync
1951:
196 dcbf 0,r4
197 addi r4,r4,32 /* Go to start of next cache line */
198 bdnz 1b
199
2002:
201 /* Set up the L2CR configuration bits (and switch L2 off) */
202 /* CPU errata: Make sure the mtspr below is already in the
203 * L1 icache
204 */
205 b 20f
206 .balign L1_CACHE_BYTES
20722:
208 sync
209 mtspr SPRN_L2CR,r3
210 sync
211 b 23f
21220:
213 b 21f
21421: sync
215 isync
216 b 22b
217
21823:
219 /* Perform a global invalidation */
220 oris r3,r3,0x0020
221 sync
222 mtspr SPRN_L2CR,r3
223 sync
224 isync /* For errata */
225
226BEGIN_FTR_SECTION
227 /* On the 7450, we wait for the L2I bit to clear......
228 */
22910: mfspr r3,SPRN_L2CR
230 andis. r4,r3,0x0020
231 bne 10b
232 b 11f
233END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
234
235 /* Wait for the invalidation to complete */
2363: mfspr r3,SPRN_L2CR
237 rlwinm. r4,r3,0,31,31
238 bne 3b
239
24011: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
241 sync
242 mtspr SPRN_L2CR,r3
243 sync
244
245 /* See if we need to enable the cache */
246 cmplwi r5,0
247 beq 4f
248
249 /* Enable the cache */
250 oris r3,r3,0x8000
251 mtspr SPRN_L2CR,r3
252 sync
253
254 /* Enable L2 HW prefetch on 744x/745x */
255BEGIN_FTR_SECTION
256 mfspr r3,SPRN_MSSCR0
257 ori r3,r3,3
258 sync
259 mtspr SPRN_MSSCR0,r3
260 sync
261 isync
262END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2634:
264
265 /* Restore HID0[DPM] to whatever it was before */
266 sync
267 mtspr 1008,r8
268 sync
269
270 /* Restore MSR (restores EE and DR bits to original state) */
271 SYNC
272 mtmsr r7
273 isync
274
275 mtlr r9
276 blr
277
278_GLOBAL(_get_L2CR)
279 /* Return the L2CR contents */
280 li r3,0
281BEGIN_FTR_SECTION
282 mfspr r3,SPRN_L2CR
283END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
284 blr
285
286
287/*
288 * Here is a similar routine for dealing with the L3 cache
289 * on the 745x family of chips
290 */
291
292_GLOBAL(_set_L3CR)
293 /* Make sure this is a 745x chip */
294BEGIN_FTR_SECTION
295 li r3,-1
296 blr
297END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
298
299 /* Turn off interrupts and data relocation. */
300 mfmsr r7 /* Save MSR in r7 */
301 rlwinm r4,r7,0,17,15
302 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
303 sync
304 mtmsr r4
305 isync
306
307 /* Stop DST streams */
308 DSSALL
309 sync
310
311 /* Get the current enable bit of the L3CR into r4 */
312 mfspr r4,SPRN_L3CR
313
314 /* Tweak some bits */
315 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
316 rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */
317 rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */
318 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
319 /* Check to see if we need to flush */
320 rlwinm. r4,r4,0,0,0
321 beq 2f
322
323 /* Flush the cache.
324 */
325
326 /* TODO: use HW flush assist */
327
328 lis r4,0x0008
329 mtctr r4
330 li r4,0
3311:
332 lwzx r0,r0,r4
333 dcbf 0,r4
334 addi r4,r4,32 /* Go to start of next cache line */
335 bdnz 1b
336
3372:
338 /* Set up the L3CR configuration bits (and switch L3 off) */
339 sync
340 mtspr SPRN_L3CR,r3
341 sync
342
343 oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */
344 mtspr SPRN_L3CR,r3
345 sync
346 oris r3,r3,L3CR_L3CLKEN@h /* Set clken */
347 mtspr SPRN_L3CR,r3
348 sync
349
350 /* Wait for stabilize */
351 li r0,256
352 mtctr r0
3531: bdnz 1b
354
355 /* Perform a global invalidation */
356 ori r3,r3,0x0400
357 sync
358 mtspr SPRN_L3CR,r3
359 sync
360 isync
361
362 /* We wait for the L3I bit to clear...... */
36310: mfspr r3,SPRN_L3CR
364 andi. r4,r3,0x0400
365 bne 10b
366
367 /* Clear CLKEN */
368 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
369 mtspr SPRN_L3CR,r3
370 sync
371
372 /* Wait for stabilize */
373 li r0,256
374 mtctr r0
3751: bdnz 1b
376
377 /* See if we need to enable the cache */
378 cmplwi r5,0
379 beq 4f
380
381 /* Enable the cache */
382 oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
383 mtspr SPRN_L3CR,r3
384 sync
385
386 /* Wait for stabilize */
387 li r0,256
388 mtctr r0
3891: bdnz 1b
390
391 /* Restore MSR (restores EE and DR bits to original state) */
3924: SYNC
393 mtmsr r7
394 isync
395 blr
396
397_GLOBAL(_get_L3CR)
398 /* Return the L3CR contents */
399 li r3,0
400BEGIN_FTR_SECTION
401 mfspr r3,SPRN_L3CR
402END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
403 blr
404
405/* --- End of PowerLogix code ---
406 */
407
408
409/* flush_disable_L1() - Flush and disable L1 cache
410 *
411 * clobbers r0, r3, ctr, cr0
412 * Must be called with interrupts disabled and MMU enabled.
413 */
414_GLOBAL(__flush_disable_L1)
415 /* Stop pending alitvec streams and memory accesses */
416BEGIN_FTR_SECTION
417 DSSALL
418END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
419 sync
420
421 /* Load counter to 0x4000 cache lines (512k) and
422 * load cache with datas
423 */
424 li r3,0x4000 /* 512kB / 32B */
425 mtctr r3
426 lis r3,KERNELBASE@h
4271:
428 lwz r0,0(r3)
429 addi r3,r3,0x0020 /* Go to start of next cache line */
430 bdnz 1b
431 isync
432 sync
433
434 /* Now flush those cache lines */
435 li r3,0x4000 /* 512kB / 32B */
436 mtctr r3
437 lis r3,KERNELBASE@h
4381:
439 dcbf 0,r3
440 addi r3,r3,0x0020 /* Go to start of next cache line */
441 bdnz 1b
442 sync
443
444 /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
445 mfspr r3,SPRN_HID0
446 rlwinm r3,r3,0,18,15
447 mtspr SPRN_HID0,r3
448 sync
449 isync
450 blr
451
452/* inval_enable_L1 - Invalidate and enable L1 cache
453 *
454 * Assumes L1 is already disabled and MSR:EE is off
455 *
456 * clobbers r3
457 */
458_GLOBAL(__inval_enable_L1)
459 /* Enable and then Flash inval the instruction & data cache */
460 mfspr r3,SPRN_HID0
461 ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
462 sync
463 isync
464 mtspr SPRN_HID0,r3
465 xori r3,r3, HID0_ICFI|HID0_DCI
466 mtspr SPRN_HID0,r3
467 sync
468
469 blr
470
471
diff --git a/arch/ppc/kernel/module.c b/arch/ppc/kernel/module.c
deleted file mode 100644
index 92f4e5f64f02..000000000000
--- a/arch/ppc/kernel/module.c
+++ /dev/null
@@ -1,320 +0,0 @@
1/* Kernel module help for PPC.
2 Copyright (C) 2001 Rusty Russell.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18#include <linux/module.h>
19#include <linux/moduleloader.h>
20#include <linux/elf.h>
21#include <linux/vmalloc.h>
22#include <linux/fs.h>
23#include <linux/string.h>
24#include <linux/kernel.h>
25#include <linux/cache.h>
26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(fmt , ...)
31#endif
32
33LIST_HEAD(module_bug_list);
34
35void *module_alloc(unsigned long size)
36{
37 if (size == 0)
38 return NULL;
39 return vmalloc(size);
40}
41
42/* Free memory returned from module_alloc */
43void module_free(struct module *mod, void *module_region)
44{
45 vfree(module_region);
46 /* FIXME: If module_region == mod->init_region, trim exception
47 table entries. */
48}
49
50/* Count how many different relocations (different symbol, different
51 addend) */
52static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
53{
54 unsigned int i, j, ret = 0;
55
56 /* Sure, this is order(n^2), but it's usually short, and not
57 time critical */
58 for (i = 0; i < num; i++) {
59 for (j = 0; j < i; j++) {
60 /* If this addend appeared before, it's
61 already been counted */
62 if (ELF32_R_SYM(rela[i].r_info)
63 == ELF32_R_SYM(rela[j].r_info)
64 && rela[i].r_addend == rela[j].r_addend)
65 break;
66 }
67 if (j == i) ret++;
68 }
69 return ret;
70}
71
72/* Get the potential trampolines size required of the init and
73 non-init sections */
74static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
75 const Elf32_Shdr *sechdrs,
76 const char *secstrings,
77 int is_init)
78{
79 unsigned long ret = 0;
80 unsigned i;
81
82 /* Everything marked ALLOC (this includes the exported
83 symbols) */
84 for (i = 1; i < hdr->e_shnum; i++) {
85 /* If it's called *.init*, and we're not init, we're
86 not interested */
87 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
88 != is_init)
89 continue;
90
91 /* We don't want to look at debug sections. */
92 if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0)
93 continue;
94
95 if (sechdrs[i].sh_type == SHT_RELA) {
96 DEBUGP("Found relocations in section %u\n", i);
97 DEBUGP("Ptr: %p. Number: %u\n",
98 (void *)hdr + sechdrs[i].sh_offset,
99 sechdrs[i].sh_size / sizeof(Elf32_Rela));
100 ret += count_relocs((void *)hdr
101 + sechdrs[i].sh_offset,
102 sechdrs[i].sh_size
103 / sizeof(Elf32_Rela))
104 * sizeof(struct ppc_plt_entry);
105 }
106 }
107
108 return ret;
109}
110
111int module_frob_arch_sections(Elf32_Ehdr *hdr,
112 Elf32_Shdr *sechdrs,
113 char *secstrings,
114 struct module *me)
115{
116 unsigned int i;
117
118 /* Find .plt and .init.plt sections */
119 for (i = 0; i < hdr->e_shnum; i++) {
120 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
121 me->arch.init_plt_section = i;
122 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
123 me->arch.core_plt_section = i;
124 }
125 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
126 printk("Module doesn't contain .plt or .init.plt sections.\n");
127 return -ENOEXEC;
128 }
129
130 /* Override their sizes */
131 sechdrs[me->arch.core_plt_section].sh_size
132 = get_plt_size(hdr, sechdrs, secstrings, 0);
133 sechdrs[me->arch.init_plt_section].sh_size
134 = get_plt_size(hdr, sechdrs, secstrings, 1);
135 return 0;
136}
137
138int apply_relocate(Elf32_Shdr *sechdrs,
139 const char *strtab,
140 unsigned int symindex,
141 unsigned int relsec,
142 struct module *module)
143{
144 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
145 module->name);
146 return -ENOEXEC;
147}
148
149static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
150{
151 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
152 && entry->jump[1] == 0x396b0000 + (val & 0xffff))
153 return 1;
154 return 0;
155}
156
157/* Set up a trampoline in the PLT to bounce us to the distant function */
158static uint32_t do_plt_call(void *location,
159 Elf32_Addr val,
160 Elf32_Shdr *sechdrs,
161 struct module *mod)
162{
163 struct ppc_plt_entry *entry;
164
165 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
166 /* Init, or core PLT? */
167 if (location >= mod->module_core
168 && location < mod->module_core + mod->core_size)
169 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
170 else
171 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
172
173 /* Find this entry, or if that fails, the next avail. entry */
174 while (entry->jump[0]) {
175 if (entry_matches(entry, val)) return (uint32_t)entry;
176 entry++;
177 }
178
179 /* Stolen from Paul Mackerras as well... */
180 entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
181 entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
182 entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
183 entry->jump[3] = 0x4e800420; /* bctr */
184
185 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
186 return (uint32_t)entry;
187}
188
189int apply_relocate_add(Elf32_Shdr *sechdrs,
190 const char *strtab,
191 unsigned int symindex,
192 unsigned int relsec,
193 struct module *module)
194{
195 unsigned int i;
196 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
197 Elf32_Sym *sym;
198 uint32_t *location;
199 uint32_t value;
200
201 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
202 sechdrs[relsec].sh_info);
203 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
204 /* This is where to make the change */
205 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
206 + rela[i].r_offset;
207 /* This is the symbol it is referring to. Note that all
208 undefined symbols have been resolved. */
209 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
210 + ELF32_R_SYM(rela[i].r_info);
211 /* `Everything is relative'. */
212 value = sym->st_value + rela[i].r_addend;
213
214 switch (ELF32_R_TYPE(rela[i].r_info)) {
215 case R_PPC_ADDR32:
216 /* Simply set it */
217 *(uint32_t *)location = value;
218 break;
219
220 case R_PPC_ADDR16_LO:
221 /* Low half of the symbol */
222 *(uint16_t *)location = value;
223 break;
224
225 case R_PPC_ADDR16_HA:
226 /* Sign-adjusted lower 16 bits: PPC ELF ABI says:
227 (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
228 This is the same, only sane.
229 */
230 *(uint16_t *)location = (value + 0x8000) >> 16;
231 break;
232
233 case R_PPC_REL24:
234 if ((int)(value - (uint32_t)location) < -0x02000000
235 || (int)(value - (uint32_t)location) >= 0x02000000)
236 value = do_plt_call(location, value,
237 sechdrs, module);
238
239 /* Only replace bits 2 through 26 */
240 DEBUGP("REL24 value = %08X. location = %08X\n",
241 value, (uint32_t)location);
242 DEBUGP("Location before: %08X.\n",
243 *(uint32_t *)location);
244 *(uint32_t *)location
245 = (*(uint32_t *)location & ~0x03fffffc)
246 | ((value - (uint32_t)location)
247 & 0x03fffffc);
248 DEBUGP("Location after: %08X.\n",
249 *(uint32_t *)location);
250 DEBUGP("ie. jump to %08X+%08X = %08X\n",
251 *(uint32_t *)location & 0x03fffffc,
252 (uint32_t)location,
253 (*(uint32_t *)location & 0x03fffffc)
254 + (uint32_t)location);
255 break;
256
257 case R_PPC_REL32:
258 /* 32-bit relative jump. */
259 *(uint32_t *)location = value - (uint32_t)location;
260 break;
261
262 default:
263 printk("%s: unknown ADD relocation: %u\n",
264 module->name,
265 ELF32_R_TYPE(rela[i].r_info));
266 return -ENOEXEC;
267 }
268 }
269 return 0;
270}
271
272int module_finalize(const Elf_Ehdr *hdr,
273 const Elf_Shdr *sechdrs,
274 struct module *me)
275{
276 char *secstrings;
277 unsigned int i;
278
279 me->arch.bug_table = NULL;
280 me->arch.num_bugs = 0;
281
282 /* Find the __bug_table section, if present */
283 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
284 for (i = 1; i < hdr->e_shnum; i++) {
285 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
286 continue;
287 me->arch.bug_table = (void *) sechdrs[i].sh_addr;
288 me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
289 break;
290 }
291
292 /*
293 * Strictly speaking this should have a spinlock to protect against
294 * traversals, but since we only traverse on BUG()s, a spinlock
295 * could potentially lead to deadlock and thus be counter-productive.
296 */
297 list_add(&me->arch.bug_list, &module_bug_list);
298
299 return 0;
300}
301
302void module_arch_cleanup(struct module *mod)
303{
304 list_del(&mod->arch.bug_list);
305}
306
307struct bug_entry *module_find_bug(unsigned long bugaddr)
308{
309 struct mod_arch_specific *mod;
310 unsigned int i;
311 struct bug_entry *bug;
312
313 list_for_each_entry(mod, &module_bug_list, bug_list) {
314 bug = mod->bug_table;
315 for (i = 0; i < mod->num_bugs; ++i, ++bug)
316 if (bugaddr == bug->bug_addr)
317 return bug;
318 }
319 return NULL;
320}
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 04d04c5bfdd0..809673a36f7a 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -46,9 +46,6 @@ static void pcibios_fixup_resources(struct pci_dev* dev);
46static void fixup_broken_pcnet32(struct pci_dev* dev); 46static void fixup_broken_pcnet32(struct pci_dev* dev);
47static int reparent_resources(struct resource *parent, struct resource *res); 47static int reparent_resources(struct resource *parent, struct resource *res);
48static void fixup_cpc710_pci64(struct pci_dev* dev); 48static void fixup_cpc710_pci64(struct pci_dev* dev);
49#ifdef CONFIG_PPC_OF
50static u8* pci_to_OF_bus_map;
51#endif
52 49
53/* By default, we don't re-assign bus numbers. 50/* By default, we don't re-assign bus numbers.
54 */ 51 */
@@ -625,406 +622,13 @@ pcibios_alloc_controller(void)
625 return hose; 622 return hose;
626} 623}
627 624
628#ifdef CONFIG_PPC_OF
629/*
630 * Functions below are used on OpenFirmware machines.
631 */
632static void
633make_one_node_map(struct device_node* node, u8 pci_bus)
634{
635 int *bus_range;
636 int len;
637
638 if (pci_bus >= pci_bus_count)
639 return;
640 bus_range = (int *) get_property(node, "bus-range", &len);
641 if (bus_range == NULL || len < 2 * sizeof(int)) {
642 printk(KERN_WARNING "Can't get bus-range for %s, "
643 "assuming it starts at 0\n", node->full_name);
644 pci_to_OF_bus_map[pci_bus] = 0;
645 } else
646 pci_to_OF_bus_map[pci_bus] = bus_range[0];
647
648 for (node=node->child; node != 0;node = node->sibling) {
649 struct pci_dev* dev;
650 unsigned int *class_code, *reg;
651
652 class_code = (unsigned int *) get_property(node, "class-code", NULL);
653 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
654 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
655 continue;
656 reg = (unsigned int *)get_property(node, "reg", NULL);
657 if (!reg)
658 continue;
659 dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
660 if (!dev || !dev->subordinate)
661 continue;
662 make_one_node_map(node, dev->subordinate->number);
663 }
664}
665
666void
667pcibios_make_OF_bus_map(void)
668{
669 int i;
670 struct pci_controller* hose;
671 u8* of_prop_map;
672
673 pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
674 if (!pci_to_OF_bus_map) {
675 printk(KERN_ERR "Can't allocate OF bus map !\n");
676 return;
677 }
678
679 /* We fill the bus map with invalid values, that helps
680 * debugging.
681 */
682 for (i=0; i<pci_bus_count; i++)
683 pci_to_OF_bus_map[i] = 0xff;
684
685 /* For each hose, we begin searching bridges */
686 for(hose=hose_head; hose; hose=hose->next) {
687 struct device_node* node;
688 node = (struct device_node *)hose->arch_data;
689 if (!node)
690 continue;
691 make_one_node_map(node, hose->first_busno);
692 }
693 of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
694 if (of_prop_map)
695 memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
696#ifdef DEBUG
697 printk("PCI->OF bus map:\n");
698 for (i=0; i<pci_bus_count; i++) {
699 if (pci_to_OF_bus_map[i] == 0xff)
700 continue;
701 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
702 }
703#endif
704}
705
706typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
707
708static struct device_node*
709scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
710{
711 struct device_node* sub_node;
712
713 for (; node != 0;node = node->sibling) {
714 unsigned int *class_code;
715
716 if (filter(node, data))
717 return node;
718
719 /* For PCI<->PCI bridges or CardBus bridges, we go down
720 * Note: some OFs create a parent node "multifunc-device" as
721 * a fake root for all functions of a multi-function device,
722 * we go down them as well.
723 */
724 class_code = (unsigned int *) get_property(node, "class-code", NULL);
725 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
726 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
727 strcmp(node->name, "multifunc-device"))
728 continue;
729 sub_node = scan_OF_pci_childs(node->child, filter, data);
730 if (sub_node)
731 return sub_node;
732 }
733 return NULL;
734}
735
736static int
737scan_OF_pci_childs_iterator(struct device_node* node, void* data)
738{
739 unsigned int *reg;
740 u8* fdata = (u8*)data;
741
742 reg = (unsigned int *) get_property(node, "reg", NULL);
743 if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
744 && ((reg[0] >> 16) & 0xff) == fdata[0])
745 return 1;
746 return 0;
747}
748
749static struct device_node*
750scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
751{
752 u8 filter_data[2] = {bus, dev_fn};
753
754 return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
755}
756
757/*
758 * Scans the OF tree for a device node matching a PCI device
759 */
760struct device_node *
761pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
762{
763 struct pci_controller *hose;
764 struct device_node *node;
765 int busnr;
766
767 if (!have_of)
768 return NULL;
769
770 /* Lookup the hose */
771 busnr = bus->number;
772 hose = pci_bus_to_hose(busnr);
773 if (!hose)
774 return NULL;
775
776 /* Check it has an OF node associated */
777 node = (struct device_node *) hose->arch_data;
778 if (!node)
779 return NULL;
780
781 /* Fixup bus number according to what OF think it is. */
782 if (pci_to_OF_bus_map)
783 busnr = pci_to_OF_bus_map[busnr];
784 if (busnr == 0xff)
785 return NULL;
786
787 /* Now, lookup childs of the hose */
788 return scan_OF_childs_for_device(node->child, busnr, devfn);
789}
790EXPORT_SYMBOL(pci_busdev_to_OF_node);
791
792struct device_node*
793pci_device_to_OF_node(struct pci_dev *dev)
794{
795 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
796}
797EXPORT_SYMBOL(pci_device_to_OF_node);
798
799/* This routine is meant to be used early during boot, when the
800 * PCI bus numbers have not yet been assigned, and you need to
801 * issue PCI config cycles to an OF device.
802 * It could also be used to "fix" RTAS config cycles if you want
803 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
804 * config cycles.
805 */
806struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
807{
808 if (!have_of)
809 return NULL;
810 while(node) {
811 struct pci_controller* hose;
812 for (hose=hose_head;hose;hose=hose->next)
813 if (hose->arch_data == node)
814 return hose;
815 node=node->parent;
816 }
817 return NULL;
818}
819
820static int
821find_OF_pci_device_filter(struct device_node* node, void* data)
822{
823 return ((void *)node == data);
824}
825
826/*
827 * Returns the PCI device matching a given OF node
828 */
829int
830pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
831{
832 unsigned int *reg;
833 struct pci_controller* hose;
834 struct pci_dev* dev = NULL;
835
836 if (!have_of)
837 return -ENODEV;
838 /* Make sure it's really a PCI device */
839 hose = pci_find_hose_for_OF_device(node);
840 if (!hose || !hose->arch_data)
841 return -ENODEV;
842 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
843 find_OF_pci_device_filter, (void *)node))
844 return -ENODEV;
845 reg = (unsigned int *) get_property(node, "reg", NULL);
846 if (!reg)
847 return -ENODEV;
848 *bus = (reg[0] >> 16) & 0xff;
849 *devfn = ((reg[0] >> 8) & 0xff);
850
851 /* Ok, here we need some tweak. If we have already renumbered
852 * all busses, we can't rely on the OF bus number any more.
853 * the pci_to_OF_bus_map is not enough as several PCI busses
854 * may match the same OF bus number.
855 */
856 if (!pci_to_OF_bus_map)
857 return 0;
858
859 for_each_pci_dev(dev)
860 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
861 dev->devfn == *devfn) {
862 *bus = dev->bus->number;
863 pci_dev_put(dev);
864 return 0;
865 }
866
867 return -ENODEV;
868}
869EXPORT_SYMBOL(pci_device_from_OF_node);
870
871void __init
872pci_process_bridge_OF_ranges(struct pci_controller *hose,
873 struct device_node *dev, int primary)
874{
875 static unsigned int static_lc_ranges[256] __initdata;
876 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
877 unsigned int size;
878 int rlen = 0, orig_rlen;
879 int memno = 0;
880 struct resource *res;
881 int np, na = prom_n_addr_cells(dev);
882 np = na + 5;
883
884 /* First we try to merge ranges to fix a problem with some pmacs
885 * that can have more than 3 ranges, fortunately using contiguous
886 * addresses -- BenH
887 */
888 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
889 if (!dt_ranges)
890 return;
891 /* Sanity check, though hopefully that never happens */
892 if (rlen > sizeof(static_lc_ranges)) {
893 printk(KERN_WARNING "OF ranges property too large !\n");
894 rlen = sizeof(static_lc_ranges);
895 }
896 lc_ranges = static_lc_ranges;
897 memcpy(lc_ranges, dt_ranges, rlen);
898 orig_rlen = rlen;
899
900 /* Let's work on a copy of the "ranges" property instead of damaging
901 * the device-tree image in memory
902 */
903 ranges = lc_ranges;
904 prev = NULL;
905 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
906 if (prev) {
907 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
908 (prev[2] + prev[na+4]) == ranges[2] &&
909 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
910 prev[na+4] += ranges[na+4];
911 ranges[0] = 0;
912 ranges += np;
913 continue;
914 }
915 }
916 prev = ranges;
917 ranges += np;
918 }
919
920 /*
921 * The ranges property is laid out as an array of elements,
922 * each of which comprises:
923 * cells 0 - 2: a PCI address
924 * cells 3 or 3+4: a CPU physical address
925 * (size depending on dev->n_addr_cells)
926 * cells 4+5 or 5+6: the size of the range
927 */
928 ranges = lc_ranges;
929 rlen = orig_rlen;
930 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
931 res = NULL;
932 size = ranges[na+4];
933 switch ((ranges[0] >> 24) & 0x3) {
934 case 1: /* I/O space */
935 if (ranges[2] != 0)
936 break;
937 hose->io_base_phys = ranges[na+2];
938 /* limit I/O space to 16MB */
939 if (size > 0x01000000)
940 size = 0x01000000;
941 hose->io_base_virt = ioremap(ranges[na+2], size);
942 if (primary)
943 isa_io_base = (unsigned long) hose->io_base_virt;
944 res = &hose->io_resource;
945 res->flags = IORESOURCE_IO;
946 res->start = ranges[2];
947 DBG("PCI: IO 0x%lx -> 0x%lx\n",
948 res->start, res->start + size - 1);
949 break;
950 case 2: /* memory space */
951 memno = 0;
952 if (ranges[1] == 0 && ranges[2] == 0
953 && ranges[na+4] <= (16 << 20)) {
954 /* 1st 16MB, i.e. ISA memory area */
955 if (primary)
956 isa_mem_base = ranges[na+2];
957 memno = 1;
958 }
959 while (memno < 3 && hose->mem_resources[memno].flags)
960 ++memno;
961 if (memno == 0)
962 hose->pci_mem_offset = ranges[na+2] - ranges[2];
963 if (memno < 3) {
964 res = &hose->mem_resources[memno];
965 res->flags = IORESOURCE_MEM;
966 if(ranges[0] & 0x40000000)
967 res->flags |= IORESOURCE_PREFETCH;
968 res->start = ranges[na+2];
969 DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno,
970 res->start, res->start + size - 1);
971 }
972 break;
973 }
974 if (res != NULL) {
975 res->name = dev->full_name;
976 res->end = res->start + size - 1;
977 res->parent = NULL;
978 res->sibling = NULL;
979 res->child = NULL;
980 }
981 ranges += np;
982 }
983}
984
985/* We create the "pci-OF-bus-map" property now so it appears in the
986 * /proc device tree
987 */
988void __init
989pci_create_OF_bus_map(void)
990{
991 struct property* of_prop;
992
993 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
994 if (of_prop && find_path_device("/")) {
995 memset(of_prop, -1, sizeof(struct property) + 256);
996 of_prop->name = "pci-OF-bus-map";
997 of_prop->length = 256;
998 of_prop->value = (unsigned char *)&of_prop[1];
999 prom_add_property(find_path_device("/"), of_prop);
1000 }
1001}
1002
1003static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1004{
1005 struct pci_dev *pdev;
1006 struct device_node *np;
1007
1008 pdev = to_pci_dev (dev);
1009 np = pci_device_to_OF_node(pdev);
1010 if (np == NULL || np->full_name == NULL)
1011 return 0;
1012 return sprintf(buf, "%s", np->full_name);
1013}
1014static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1015
1016#else /* CONFIG_PPC_OF */
1017void pcibios_make_OF_bus_map(void) 625void pcibios_make_OF_bus_map(void)
1018{ 626{
1019} 627}
1020#endif /* CONFIG_PPC_OF */
1021 628
1022/* Add sysfs properties */ 629/* Add sysfs properties */
1023void pcibios_add_platform_entries(struct pci_dev *pdev) 630void pcibios_add_platform_entries(struct pci_dev *pdev)
1024{ 631{
1025#ifdef CONFIG_PPC_OF
1026 device_create_file(&pdev->dev, &dev_attr_devspec);
1027#endif /* CONFIG_PPC_OF */
1028} 632}
1029 633
1030 634
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
deleted file mode 100644
index 32455dfcc36b..000000000000
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/* kernel/perfmon_fsl_booke.c
2 * Freescale Book-E Performance Monitor code
3 *
4 * Author: Andy Fleming
5 * Copyright (c) 2004 Freescale Semiconductor, Inc
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/a.out.h>
23#include <linux/interrupt.h>
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/prctl.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/reg.h>
34#include <asm/xmon.h>
35#include <asm/pmc.h>
36
37static inline u32 get_pmlca(int ctr);
38static inline void set_pmlca(int ctr, u32 pmlca);
39
40static inline u32 get_pmlca(int ctr)
41{
42 u32 pmlca;
43
44 switch (ctr) {
45 case 0:
46 pmlca = mfpmr(PMRN_PMLCA0);
47 break;
48 case 1:
49 pmlca = mfpmr(PMRN_PMLCA1);
50 break;
51 case 2:
52 pmlca = mfpmr(PMRN_PMLCA2);
53 break;
54 case 3:
55 pmlca = mfpmr(PMRN_PMLCA3);
56 break;
57 default:
58 panic("Bad ctr number\n");
59 }
60
61 return pmlca;
62}
63
64static inline void set_pmlca(int ctr, u32 pmlca)
65{
66 switch (ctr) {
67 case 0:
68 mtpmr(PMRN_PMLCA0, pmlca);
69 break;
70 case 1:
71 mtpmr(PMRN_PMLCA1, pmlca);
72 break;
73 case 2:
74 mtpmr(PMRN_PMLCA2, pmlca);
75 break;
76 case 3:
77 mtpmr(PMRN_PMLCA3, pmlca);
78 break;
79 default:
80 panic("Bad ctr number\n");
81 }
82}
83
84void init_pmc_stop(int ctr)
85{
86 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
87 PMLCA_FCM1 | PMLCA_FCM0);
88 u32 pmlcb = 0;
89
90 switch (ctr) {
91 case 0:
92 mtpmr(PMRN_PMLCA0, pmlca);
93 mtpmr(PMRN_PMLCB0, pmlcb);
94 break;
95 case 1:
96 mtpmr(PMRN_PMLCA1, pmlca);
97 mtpmr(PMRN_PMLCB1, pmlcb);
98 break;
99 case 2:
100 mtpmr(PMRN_PMLCA2, pmlca);
101 mtpmr(PMRN_PMLCB2, pmlcb);
102 break;
103 case 3:
104 mtpmr(PMRN_PMLCA3, pmlca);
105 mtpmr(PMRN_PMLCB3, pmlcb);
106 break;
107 default:
108 panic("Bad ctr number!\n");
109 }
110}
111
112void set_pmc_event(int ctr, int event)
113{
114 u32 pmlca;
115
116 pmlca = get_pmlca(ctr);
117
118 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
119 ((event << PMLCA_EVENT_SHIFT) &
120 PMLCA_EVENT_MASK);
121
122 set_pmlca(ctr, pmlca);
123}
124
125void set_pmc_user_kernel(int ctr, int user, int kernel)
126{
127 u32 pmlca;
128
129 pmlca = get_pmlca(ctr);
130
131 if(user)
132 pmlca &= ~PMLCA_FCU;
133 else
134 pmlca |= PMLCA_FCU;
135
136 if(kernel)
137 pmlca &= ~PMLCA_FCS;
138 else
139 pmlca |= PMLCA_FCS;
140
141 set_pmlca(ctr, pmlca);
142}
143
144void set_pmc_marked(int ctr, int mark0, int mark1)
145{
146 u32 pmlca = get_pmlca(ctr);
147
148 if(mark0)
149 pmlca &= ~PMLCA_FCM0;
150 else
151 pmlca |= PMLCA_FCM0;
152
153 if(mark1)
154 pmlca &= ~PMLCA_FCM1;
155 else
156 pmlca |= PMLCA_FCM1;
157
158 set_pmlca(ctr, pmlca);
159}
160
161void pmc_start_ctr(int ctr, int enable)
162{
163 u32 pmlca = get_pmlca(ctr);
164
165 pmlca &= ~PMLCA_FC;
166
167 if (enable)
168 pmlca |= PMLCA_CE;
169 else
170 pmlca &= ~PMLCA_CE;
171
172 set_pmlca(ctr, pmlca);
173}
174
175void pmc_start_ctrs(int enable)
176{
177 u32 pmgc0 = mfpmr(PMRN_PMGC0);
178
179 pmgc0 &= ~PMGC0_FAC;
180 pmgc0 |= PMGC0_FCECE;
181
182 if (enable)
183 pmgc0 |= PMGC0_PMIE;
184 else
185 pmgc0 &= ~PMGC0_PMIE;
186
187 mtpmr(PMRN_PMGC0, pmgc0);
188}
189
190void pmc_stop_ctrs(void)
191{
192 u32 pmgc0 = mfpmr(PMRN_PMGC0);
193
194 pmgc0 |= PMGC0_FAC;
195
196 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
197
198 mtpmr(PMRN_PMGC0, pmgc0);
199}
200
201void dump_pmcs(void)
202{
203 printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
204 printk("pmc\t\tpmlca\t\tpmlcb\n");
205 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
206 mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
207 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
208 mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
209 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
210 mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
211 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
212 mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
213}
214
215EXPORT_SYMBOL(init_pmc_stop);
216EXPORT_SYMBOL(set_pmc_event);
217EXPORT_SYMBOL(set_pmc_user_kernel);
218EXPORT_SYMBOL(set_pmc_marked);
219EXPORT_SYMBOL(pmc_start_ctr);
220EXPORT_SYMBOL(pmc_start_ctrs);
221EXPORT_SYMBOL(pmc_stop_ctrs);
222EXPORT_SYMBOL(dump_pmcs);
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index 9b84bffdefce..75c645043746 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -104,7 +104,7 @@ static char *pmc2_lookup(unsigned long mmcr0)
104static int ppc_htab_show(struct seq_file *m, void *v) 104static int ppc_htab_show(struct seq_file *m, void *v)
105{ 105{
106 unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0; 106 unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
107#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE) 107#if defined(CONFIG_PPC_STD_MMU)
108 unsigned int kptes = 0, uptes = 0; 108 unsigned int kptes = 0, uptes = 0;
109 PTE *ptr; 109 PTE *ptr;
110#endif /* CONFIG_PPC_STD_MMU */ 110#endif /* CONFIG_PPC_STD_MMU */
@@ -133,7 +133,6 @@ static int ppc_htab_show(struct seq_file *m, void *v)
133 return 0; 133 return 0;
134 } 134 }
135 135
136#ifndef CONFIG_PPC64BRIDGE
137 for (ptr = Hash; ptr < Hash_end; ptr++) { 136 for (ptr = Hash; ptr < Hash_end; ptr++) {
138 unsigned int mctx, vsid; 137 unsigned int mctx, vsid;
139 138
@@ -147,7 +146,6 @@ static int ppc_htab_show(struct seq_file *m, void *v)
147 else 146 else
148 uptes++; 147 uptes++;
149 } 148 }
150#endif
151 149
152 seq_printf(m, 150 seq_printf(m,
153 "PTE Hash Table Information\n" 151 "PTE Hash Table Information\n"
@@ -155,20 +153,16 @@ static int ppc_htab_show(struct seq_file *m, void *v)
155 "Buckets\t\t: %lu\n" 153 "Buckets\t\t: %lu\n"
156 "Address\t\t: %08lx\n" 154 "Address\t\t: %08lx\n"
157 "Entries\t\t: %lu\n" 155 "Entries\t\t: %lu\n"
158#ifndef CONFIG_PPC64BRIDGE
159 "User ptes\t: %u\n" 156 "User ptes\t: %u\n"
160 "Kernel ptes\t: %u\n" 157 "Kernel ptes\t: %u\n"
161 "Percent full\t: %lu%%\n" 158 "Percent full\t: %lu%%\n"
162#endif
163 , (unsigned long)(Hash_size>>10), 159 , (unsigned long)(Hash_size>>10),
164 (Hash_size/(sizeof(PTE)*8)), 160 (Hash_size/(sizeof(PTE)*8)),
165 (unsigned long)Hash, 161 (unsigned long)Hash,
166 Hash_size/sizeof(PTE) 162 Hash_size/sizeof(PTE)
167#ifndef CONFIG_PPC64BRIDGE
168 , uptes, 163 , uptes,
169 kptes, 164 kptes,
170 ((kptes+uptes)*100) / (Hash_size/sizeof(PTE)) 165 ((kptes+uptes)*100) / (Hash_size/sizeof(PTE))
171#endif
172 ); 166 );
173 167
174 seq_printf(m, 168 seq_printf(m,
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 82adb4601348..865ba74991a9 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -18,7 +18,6 @@
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19 19
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/semaphore.h>
22#include <asm/processor.h> 21#include <asm/processor.h>
23#include <asm/uaccess.h> 22#include <asm/uaccess.h>
24#include <asm/io.h> 23#include <asm/io.h>
@@ -30,7 +29,6 @@
30#include <linux/adb.h> 29#include <linux/adb.h>
31#include <linux/cuda.h> 30#include <linux/cuda.h>
32#include <linux/pmu.h> 31#include <linux/pmu.h>
33#include <asm/prom.h>
34#include <asm/system.h> 32#include <asm/system.h>
35#include <asm/pci-bridge.h> 33#include <asm/pci-bridge.h>
36#include <asm/irq.h> 34#include <asm/irq.h>
@@ -208,27 +206,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
208EXPORT_SYMBOL(cuda_request); 206EXPORT_SYMBOL(cuda_request);
209EXPORT_SYMBOL(cuda_poll); 207EXPORT_SYMBOL(cuda_poll);
210#endif /* CONFIG_ADB_CUDA */ 208#endif /* CONFIG_ADB_CUDA */
211#ifdef CONFIG_PPC_OF
212EXPORT_SYMBOL(find_devices);
213EXPORT_SYMBOL(find_type_devices);
214EXPORT_SYMBOL(find_compatible_devices);
215EXPORT_SYMBOL(find_path_device);
216EXPORT_SYMBOL(device_is_compatible);
217EXPORT_SYMBOL(machine_is_compatible);
218EXPORT_SYMBOL(find_all_nodes);
219EXPORT_SYMBOL(get_property);
220EXPORT_SYMBOL(request_OF_resource);
221EXPORT_SYMBOL(release_OF_resource);
222EXPORT_SYMBOL(of_find_node_by_name);
223EXPORT_SYMBOL(of_find_node_by_type);
224EXPORT_SYMBOL(of_find_compatible_node);
225EXPORT_SYMBOL(of_find_node_by_path);
226EXPORT_SYMBOL(of_find_all_nodes);
227EXPORT_SYMBOL(of_get_parent);
228EXPORT_SYMBOL(of_get_next_child);
229EXPORT_SYMBOL(of_node_get);
230EXPORT_SYMBOL(of_node_put);
231#endif /* CONFIG_PPC_OF */
232#if defined(CONFIG_BOOTX_TEXT) 209#if defined(CONFIG_BOOTX_TEXT)
233EXPORT_SYMBOL(btext_update_display); 210EXPORT_SYMBOL(btext_update_display);
234#endif 211#endif
@@ -262,9 +239,6 @@ EXPORT_SYMBOL(console_drivers);
262EXPORT_SYMBOL(xmon); 239EXPORT_SYMBOL(xmon);
263EXPORT_SYMBOL(xmon_printf); 240EXPORT_SYMBOL(xmon_printf);
264#endif 241#endif
265EXPORT_SYMBOL(__up);
266EXPORT_SYMBOL(__down);
267EXPORT_SYMBOL(__down_interruptible);
268 242
269#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) 243#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
270extern void (*debugger)(struct pt_regs *regs); 244extern void (*debugger)(struct pt_regs *regs);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 53e9deacee82..1f79e84ab464 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Common prep/chrp boot and setup code. 2 * Common prep boot and setup code.
3 */ 3 */
4 4
5#include <linux/config.h> 5#include <linux/config.h>
@@ -72,17 +72,12 @@ unsigned long ISA_DMA_THRESHOLD;
72unsigned int DMA_MODE_READ; 72unsigned int DMA_MODE_READ;
73unsigned int DMA_MODE_WRITE; 73unsigned int DMA_MODE_WRITE;
74 74
75#ifdef CONFIG_PPC_MULTIPLATFORM 75#ifdef CONFIG_PPC_PREP
76int _machine = 0;
77EXPORT_SYMBOL(_machine);
78
79extern void prep_init(unsigned long r3, unsigned long r4, 76extern void prep_init(unsigned long r3, unsigned long r4,
80 unsigned long r5, unsigned long r6, unsigned long r7); 77 unsigned long r5, unsigned long r6, unsigned long r7);
81extern void chrp_init(unsigned long r3, unsigned long r4,
82 unsigned long r5, unsigned long r6, unsigned long r7);
83 78
84dev_t boot_dev; 79dev_t boot_dev;
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 80#endif /* CONFIG_PPC_PREP */
86 81
87int have_of; 82int have_of;
88EXPORT_SYMBOL(have_of); 83EXPORT_SYMBOL(have_of);
@@ -319,72 +314,12 @@ early_init(int r3, int r4, int r5)
319 identify_cpu(offset, 0); 314 identify_cpu(offset, 0);
320 do_cpu_ftr_fixups(offset); 315 do_cpu_ftr_fixups(offset);
321 316
322#if defined(CONFIG_PPC_OF)
323 reloc_got2(offset);
324
325 /*
326 * don't do anything on prep
327 * for now, don't use bootinfo because it breaks yaboot 0.5
328 * and assume that if we didn't find a magic number, we have OF
329 */
330 if (*(unsigned long *)(0) != 0xdeadc0de)
331 phys = prom_init(r3, r4, (prom_entry)r5);
332
333 reloc_got2(-offset);
334#endif
335
336 return phys; 317 return phys;
337} 318}
338 319
339#ifdef CONFIG_PPC_OF 320#ifdef CONFIG_PPC_PREP
340/*
341 * Assume here that all clock rates are the same in a
342 * smp system. -- Cort
343 */
344int
345of_show_percpuinfo(struct seq_file *m, int i)
346{
347 struct device_node *cpu_node;
348 u32 *fp;
349 int s;
350
351 cpu_node = find_type_devices("cpu");
352 if (!cpu_node)
353 return 0;
354 for (s = 0; s < i && cpu_node->next; s++)
355 cpu_node = cpu_node->next;
356 fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL);
357 if (fp)
358 seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000);
359 return 0;
360}
361
362void __init
363intuit_machine_type(void)
364{
365 char *model;
366 struct device_node *root;
367
368 /* ask the OF info if we're a chrp or pmac */
369 root = find_path_device("/");
370 if (root != 0) {
371 /* assume pmac unless proven to be chrp -- Cort */
372 _machine = _MACH_Pmac;
373 model = get_property(root, "device_type", NULL);
374 if (model && !strncmp("chrp", model, 4))
375 _machine = _MACH_chrp;
376 else {
377 model = get_property(root, "model", NULL);
378 if (model && !strncmp(model, "IBM", 3))
379 _machine = _MACH_chrp;
380 }
381 }
382}
383#endif
384
385#ifdef CONFIG_PPC_MULTIPLATFORM
386/* 321/*
387 * The PPC_MULTIPLATFORM version of platform_init... 322 * The PPC_PREP version of platform_init...
388 */ 323 */
389void __init 324void __init
390platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 325platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
@@ -399,161 +334,9 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
399 334
400 parse_bootinfo(find_bootinfo()); 335 parse_bootinfo(find_bootinfo());
401 336
402 /* if we didn't get any bootinfo telling us what we are... */ 337 prep_init(r3, r4, r5, r6, r7);
403 if (_machine == 0) {
404 /* prep boot loader tells us if we're prep or not */
405 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
406 _machine = _MACH_prep;
407 }
408
409#ifdef CONFIG_PPC_PREP
410 /* not much more to do here, if prep */
411 if (_machine == _MACH_prep) {
412 prep_init(r3, r4, r5, r6, r7);
413 return;
414 }
415#endif
416
417#ifdef CONFIG_PPC_OF
418 have_of = 1;
419
420 /* prom_init has already been called from __start */
421 if (boot_infos)
422 relocate_nodes();
423
424 /* If we aren't PReP, we can find out if we're Pmac
425 * or CHRP with this. */
426 if (_machine == 0)
427 intuit_machine_type();
428
429 /* finish_device_tree may need _machine defined. */
430 finish_device_tree();
431
432 /*
433 * If we were booted via quik, r3 points to the physical
434 * address of the command-line parameters.
435 * If we were booted from an xcoff image (i.e. netbooted or
436 * booted from floppy), we get the command line from the
437 * bootargs property of the /chosen node.
438 * If an initial ramdisk is present, r3 and r4
439 * are used for initrd_start and initrd_size,
440 * otherwise they contain 0xdeadbeef.
441 */
442 if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) {
443 strlcpy(cmd_line, (char *)r3 + KERNELBASE,
444 sizeof(cmd_line));
445 } else if (boot_infos != 0) {
446 /* booted by BootX - check for ramdisk */
447 if (boot_infos->kernelParamsOffset != 0)
448 strlcpy(cmd_line, (char *) boot_infos
449 + boot_infos->kernelParamsOffset,
450 sizeof(cmd_line));
451#ifdef CONFIG_BLK_DEV_INITRD
452 if (boot_infos->ramDisk) {
453 initrd_start = (unsigned long) boot_infos
454 + boot_infos->ramDisk;
455 initrd_end = initrd_start + boot_infos->ramDiskSize;
456 initrd_below_start_ok = 1;
457 }
458#endif
459 } else {
460 struct device_node *chosen;
461 char *p;
462
463#ifdef CONFIG_BLK_DEV_INITRD
464 if (r3 && r4 && r4 != 0xdeadbeef) {
465 if (r3 < KERNELBASE)
466 r3 += KERNELBASE;
467 initrd_start = r3;
468 initrd_end = r3 + r4;
469 ROOT_DEV = Root_RAM0;
470 initrd_below_start_ok = 1;
471 }
472#endif
473 chosen = find_devices("chosen");
474 if (chosen != NULL) {
475 p = get_property(chosen, "bootargs", NULL);
476 if (p && *p) {
477 strlcpy(cmd_line, p, sizeof(cmd_line));
478 }
479 }
480 }
481#ifdef CONFIG_ADB
482 if (strstr(cmd_line, "adb_sync")) {
483 extern int __adb_probe_sync;
484 __adb_probe_sync = 1;
485 }
486#endif /* CONFIG_ADB */
487
488 switch (_machine) {
489#ifdef CONFIG_PPC_CHRP
490 case _MACH_chrp:
491 chrp_init(r3, r4, r5, r6, r7);
492 break;
493#endif
494 }
495#endif /* CONFIG_PPC_OF */
496} 338}
497#endif /* CONFIG_PPC_MULTIPLATFORM */ 339#endif /* CONFIG_PPC_PREP */
498
499#ifdef CONFIG_PPC_OF
500#ifdef CONFIG_SERIAL_CORE_CONSOLE
501extern char *of_stdout_device;
502
503static int __init set_preferred_console(void)
504{
505 struct device_node *prom_stdout;
506 char *name;
507 int offset = 0;
508
509 if (of_stdout_device == NULL)
510 return -ENODEV;
511
512 /* The user has requested a console so this is already set up. */
513 if (strstr(saved_command_line, "console="))
514 return -EBUSY;
515
516 prom_stdout = find_path_device(of_stdout_device);
517 if (!prom_stdout)
518 return -ENODEV;
519
520 name = (char *)get_property(prom_stdout, "name", NULL);
521 if (!name)
522 return -ENODEV;
523
524 if (strcmp(name, "serial") == 0) {
525 int i;
526 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
527 if (i > 8) {
528 switch (reg[1]) {
529 case 0x3f8:
530 offset = 0;
531 break;
532 case 0x2f8:
533 offset = 1;
534 break;
535 case 0x898:
536 offset = 2;
537 break;
538 case 0x890:
539 offset = 3;
540 break;
541 default:
542 /* We dont recognise the serial port */
543 return -ENODEV;
544 }
545 }
546 } else if (strcmp(name, "ch-a") == 0)
547 offset = 0;
548 else if (strcmp(name, "ch-b") == 0)
549 offset = 1;
550 else
551 return -ENODEV;
552 return add_preferred_console("ttyS", offset, NULL);
553}
554console_initcall(set_preferred_console);
555#endif /* CONFIG_SERIAL_CORE_CONSOLE */
556#endif /* CONFIG_PPC_OF */
557 340
558struct bi_record *find_bootinfo(void) 341struct bi_record *find_bootinfo(void)
559{ 342{
@@ -589,23 +372,6 @@ void parse_bootinfo(struct bi_record *rec)
589 initrd_end = data[0] + data[1] + KERNELBASE; 372 initrd_end = data[0] + data[1] + KERNELBASE;
590 break; 373 break;
591#endif /* CONFIG_BLK_DEV_INITRD */ 374#endif /* CONFIG_BLK_DEV_INITRD */
592#ifdef CONFIG_PPC_MULTIPLATFORM
593 case BI_MACHTYPE:
594 /* Machine types changed with the merge. Since the
595 * bootinfo are now deprecated, we can just hard code
596 * the appropriate conversion here for when we are
597 * called with yaboot which passes us a machine type
598 * this way.
599 */
600 switch(data[0]) {
601 case 1: _machine = _MACH_prep; break;
602 case 2: _machine = _MACH_Pmac; break;
603 case 4: _machine = _MACH_chrp; break;
604 default:
605 _machine = data[0];
606 }
607 break;
608#endif
609 case BI_MEMSIZE: 375 case BI_MEMSIZE:
610 boot_mem_size = data[0]; 376 boot_mem_size = data[0];
611 break; 377 break;
@@ -631,9 +397,6 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
631#ifdef CONFIG_6xx 397#ifdef CONFIG_6xx
632 ppc_md.power_save = ppc6xx_idle; 398 ppc_md.power_save = ppc6xx_idle;
633#endif 399#endif
634#ifdef CONFIG_POWER4
635 ppc_md.power_save = power4_idle;
636#endif
637 400
638 platform_init(r3, r4, r5, r6, r7); 401 platform_init(r3, r4, r5, r6, r7);
639 402
@@ -711,7 +474,7 @@ int __init ppc_init(void)
711 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 474 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
712 475
713 /* register CPU devices */ 476 /* register CPU devices */
714 for_each_cpu(i) 477 for_each_possible_cpu(i)
715 register_cpu(&cpu_devices[i], i, NULL); 478 register_cpu(&cpu_devices[i], i, NULL);
716 479
717 /* call platform init */ 480 /* call platform init */
@@ -799,7 +562,4 @@ void __init setup_arch(char **cmdline_p)
799 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 562 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
800 563
801 paging_init(); 564 paging_init();
802
803 /* this is for modules since _machine can be a define -- Cort */
804 ppc_md.ppc_machine = _machine;
805} 565}
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index e55cdda6149a..f77795a64dae 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -311,7 +311,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
311 /* Backup CPU 0 state */ 311 /* Backup CPU 0 state */
312 __save_cpu_setup(); 312 __save_cpu_setup();
313 313
314 for_each_cpu(cpu) { 314 for_each_possible_cpu(cpu) {
315 if (cpu == smp_processor_id()) 315 if (cpu == smp_processor_id())
316 continue; 316 continue;
317 /* create a process for the processor */ 317 /* create a process for the processor */
diff --git a/arch/ppc/kernel/swsusp.S b/arch/ppc/kernel/swsusp.S
deleted file mode 100644
index 69773cc1a85f..000000000000
--- a/arch/ppc/kernel/swsusp.S
+++ /dev/null
@@ -1,349 +0,0 @@
1#include <linux/config.h>
2#include <linux/threads.h>
3#include <asm/processor.h>
4#include <asm/page.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/ppc_asm.h>
8#include <asm/asm-offsets.h>
9
10
11/*
12 * Structure for storing CPU registers on the save area.
13 */
14#define SL_SP 0
15#define SL_PC 4
16#define SL_MSR 8
17#define SL_SDR1 0xc
18#define SL_SPRG0 0x10 /* 4 sprg's */
19#define SL_DBAT0 0x20
20#define SL_IBAT0 0x28
21#define SL_DBAT1 0x30
22#define SL_IBAT1 0x38
23#define SL_DBAT2 0x40
24#define SL_IBAT2 0x48
25#define SL_DBAT3 0x50
26#define SL_IBAT3 0x58
27#define SL_TB 0x60
28#define SL_R2 0x68
29#define SL_CR 0x6c
30#define SL_LR 0x70
31#define SL_R12 0x74 /* r12 to r31 */
32#define SL_SIZE (SL_R12 + 80)
33
34 .section .data
35 .align 5
36
37_GLOBAL(swsusp_save_area)
38 .space SL_SIZE
39
40
41 .section .text
42 .align 5
43
44_GLOBAL(swsusp_arch_suspend)
45
46 lis r11,swsusp_save_area@h
47 ori r11,r11,swsusp_save_area@l
48
49 mflr r0
50 stw r0,SL_LR(r11)
51 mfcr r0
52 stw r0,SL_CR(r11)
53 stw r1,SL_SP(r11)
54 stw r2,SL_R2(r11)
55 stmw r12,SL_R12(r11)
56
57 /* Save MSR & SDR1 */
58 mfmsr r4
59 stw r4,SL_MSR(r11)
60 mfsdr1 r4
61 stw r4,SL_SDR1(r11)
62
63 /* Get a stable timebase and save it */
641: mftbu r4
65 stw r4,SL_TB(r11)
66 mftb r5
67 stw r5,SL_TB+4(r11)
68 mftbu r3
69 cmpw r3,r4
70 bne 1b
71
72 /* Save SPRGs */
73 mfsprg r4,0
74 stw r4,SL_SPRG0(r11)
75 mfsprg r4,1
76 stw r4,SL_SPRG0+4(r11)
77 mfsprg r4,2
78 stw r4,SL_SPRG0+8(r11)
79 mfsprg r4,3
80 stw r4,SL_SPRG0+12(r11)
81
82 /* Save BATs */
83 mfdbatu r4,0
84 stw r4,SL_DBAT0(r11)
85 mfdbatl r4,0
86 stw r4,SL_DBAT0+4(r11)
87 mfdbatu r4,1
88 stw r4,SL_DBAT1(r11)
89 mfdbatl r4,1
90 stw r4,SL_DBAT1+4(r11)
91 mfdbatu r4,2
92 stw r4,SL_DBAT2(r11)
93 mfdbatl r4,2
94 stw r4,SL_DBAT2+4(r11)
95 mfdbatu r4,3
96 stw r4,SL_DBAT3(r11)
97 mfdbatl r4,3
98 stw r4,SL_DBAT3+4(r11)
99 mfibatu r4,0
100 stw r4,SL_IBAT0(r11)
101 mfibatl r4,0
102 stw r4,SL_IBAT0+4(r11)
103 mfibatu r4,1
104 stw r4,SL_IBAT1(r11)
105 mfibatl r4,1
106 stw r4,SL_IBAT1+4(r11)
107 mfibatu r4,2
108 stw r4,SL_IBAT2(r11)
109 mfibatl r4,2
110 stw r4,SL_IBAT2+4(r11)
111 mfibatu r4,3
112 stw r4,SL_IBAT3(r11)
113 mfibatl r4,3
114 stw r4,SL_IBAT3+4(r11)
115
116#if 0
117 /* Backup various CPU config stuffs */
118 bl __save_cpu_setup
119#endif
120 /* Call the low level suspend stuff (we should probably have made
121 * a stackframe...
122 */
123 bl swsusp_save
124
125 /* Restore LR from the save area */
126 lis r11,swsusp_save_area@h
127 ori r11,r11,swsusp_save_area@l
128 lwz r0,SL_LR(r11)
129 mtlr r0
130
131 blr
132
133
134/* Resume code */
135_GLOBAL(swsusp_arch_resume)
136
137 /* Stop pending alitvec streams and memory accesses */
138BEGIN_FTR_SECTION
139 DSSALL
140END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141 sync
142
143 /* Disable MSR:DR to make sure we don't take a TLB or
144 * hash miss during the copy, as our hash table will
145 * for a while be unuseable. For .text, we assume we are
146 * covered by a BAT. This works only for non-G5 at this
147 * point. G5 will need a better approach, possibly using
148 * a small temporary hash table filled with large mappings,
149 * disabling the MMU completely isn't a good option for
150 * performance reasons.
151 * (Note that 750's may have the same performance issue as
152 * the G5 in this case, we should investigate using moving
153 * BATs for these CPUs)
154 */
155 mfmsr r0
156 sync
157 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
158 mtmsr r0
159 sync
160 isync
161
162 /* Load ptr the list of pages to copy in r3 */
163 lis r11,(pagedir_nosave - KERNELBASE)@h
164 ori r11,r11,pagedir_nosave@l
165 lwz r10,0(r11)
166
167 /* Copy the pages. This is a very basic implementation, to
168 * be replaced by something more cache efficient */
1691:
170 tophys(r3,r10)
171 li r0,256
172 mtctr r0
173 lwz r11,pbe_address(r3) /* source */
174 tophys(r5,r11)
175 lwz r10,pbe_orig_address(r3) /* destination */
176 tophys(r6,r10)
1772:
178 lwz r8,0(r5)
179 lwz r9,4(r5)
180 lwz r10,8(r5)
181 lwz r11,12(r5)
182 addi r5,r5,16
183 stw r8,0(r6)
184 stw r9,4(r6)
185 stw r10,8(r6)
186 stw r11,12(r6)
187 addi r6,r6,16
188 bdnz 2b
189 lwz r10,pbe_next(r3)
190 cmpwi 0,r10,0
191 bne 1b
192
193 /* Do a very simple cache flush/inval of the L1 to ensure
194 * coherency of the icache
195 */
196 lis r3,0x0002
197 mtctr r3
198 li r3, 0
1991:
200 lwz r0,0(r3)
201 addi r3,r3,0x0020
202 bdnz 1b
203 isync
204 sync
205
206 /* Now flush those cache lines */
207 lis r3,0x0002
208 mtctr r3
209 li r3, 0
2101:
211 dcbf 0,r3
212 addi r3,r3,0x0020
213 bdnz 1b
214 sync
215
216 /* Ok, we are now running with the kernel data of the old
217 * kernel fully restored. We can get to the save area
218 * easily now. As for the rest of the code, it assumes the
219 * loader kernel and the booted one are exactly identical
220 */
221 lis r11,swsusp_save_area@h
222 ori r11,r11,swsusp_save_area@l
223 tophys(r11,r11)
224
225#if 0
226 /* Restore various CPU config stuffs */
227 bl __restore_cpu_setup
228#endif
229 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
230 * This is a bit hairy as we are running out of those BATs,
231 * but first, our code is probably in the icache, and we are
232 * writing the same value to the BAT, so that should be fine,
233 * though a better solution will have to be found long-term
234 */
235 lwz r4,SL_SDR1(r11)
236 mtsdr1 r4
237 lwz r4,SL_SPRG0(r11)
238 mtsprg 0,r4
239 lwz r4,SL_SPRG0+4(r11)
240 mtsprg 1,r4
241 lwz r4,SL_SPRG0+8(r11)
242 mtsprg 2,r4
243 lwz r4,SL_SPRG0+12(r11)
244 mtsprg 3,r4
245
246#if 0
247 lwz r4,SL_DBAT0(r11)
248 mtdbatu 0,r4
249 lwz r4,SL_DBAT0+4(r11)
250 mtdbatl 0,r4
251 lwz r4,SL_DBAT1(r11)
252 mtdbatu 1,r4
253 lwz r4,SL_DBAT1+4(r11)
254 mtdbatl 1,r4
255 lwz r4,SL_DBAT2(r11)
256 mtdbatu 2,r4
257 lwz r4,SL_DBAT2+4(r11)
258 mtdbatl 2,r4
259 lwz r4,SL_DBAT3(r11)
260 mtdbatu 3,r4
261 lwz r4,SL_DBAT3+4(r11)
262 mtdbatl 3,r4
263 lwz r4,SL_IBAT0(r11)
264 mtibatu 0,r4
265 lwz r4,SL_IBAT0+4(r11)
266 mtibatl 0,r4
267 lwz r4,SL_IBAT1(r11)
268 mtibatu 1,r4
269 lwz r4,SL_IBAT1+4(r11)
270 mtibatl 1,r4
271 lwz r4,SL_IBAT2(r11)
272 mtibatu 2,r4
273 lwz r4,SL_IBAT2+4(r11)
274 mtibatl 2,r4
275 lwz r4,SL_IBAT3(r11)
276 mtibatu 3,r4
277 lwz r4,SL_IBAT3+4(r11)
278 mtibatl 3,r4
279#endif
280
281BEGIN_FTR_SECTION
282 li r4,0
283 mtspr SPRN_DBAT4U,r4
284 mtspr SPRN_DBAT4L,r4
285 mtspr SPRN_DBAT5U,r4
286 mtspr SPRN_DBAT5L,r4
287 mtspr SPRN_DBAT6U,r4
288 mtspr SPRN_DBAT6L,r4
289 mtspr SPRN_DBAT7U,r4
290 mtspr SPRN_DBAT7L,r4
291 mtspr SPRN_IBAT4U,r4
292 mtspr SPRN_IBAT4L,r4
293 mtspr SPRN_IBAT5U,r4
294 mtspr SPRN_IBAT5L,r4
295 mtspr SPRN_IBAT6U,r4
296 mtspr SPRN_IBAT6L,r4
297 mtspr SPRN_IBAT7U,r4
298 mtspr SPRN_IBAT7L,r4
299END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
300
301 /* Flush all TLBs */
302 lis r4,0x1000
3031: addic. r4,r4,-0x1000
304 tlbie r4
305 blt 1b
306 sync
307
308 /* restore the MSR and turn on the MMU */
309 lwz r3,SL_MSR(r11)
310 bl turn_on_mmu
311 tovirt(r11,r11)
312
313 /* Restore TB */
314 li r3,0
315 mttbl r3
316 lwz r3,SL_TB(r11)
317 lwz r4,SL_TB+4(r11)
318 mttbu r3
319 mttbl r4
320
321 /* Kick decrementer */
322 li r0,1
323 mtdec r0
324
325 /* Restore the callee-saved registers and return */
326 lwz r0,SL_CR(r11)
327 mtcr r0
328 lwz r2,SL_R2(r11)
329 lmw r12,SL_R12(r11)
330 lwz r1,SL_SP(r11)
331 lwz r0,SL_LR(r11)
332 mtlr r0
333
334 // XXX Note: we don't really need to call swsusp_resume
335
336 li r3,0
337 blr
338
339/* FIXME:This construct is actually not useful since we don't shut
340 * down the instruction MMU, we could just flip back MSR-DR on.
341 */
342turn_on_mmu:
343 mflr r4
344 mtsrr0 r4
345 mtsrr1 r3
346 sync
347 isync
348 rfi
349
diff --git a/arch/ppc/kernel/temp.c b/arch/ppc/kernel/temp.c
deleted file mode 100644
index 26bd8ea35a4e..000000000000
--- a/arch/ppc/kernel/temp.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * temp.c Thermal management for cpu's with Thermal Assist Units
3 *
4 * Written by Troy Benjegerdes <hozer@drgw.net>
5 *
6 * TODO:
7 * dynamic power management to limit peak CPU temp (using ICTC)
8 * calibration???
9 *
10 * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
11 * life in portables, and add a 'performance/watt' metric somewhere in /proc
12 */
13
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/jiffies.h>
17#include <linux/kernel.h>
18#include <linux/param.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23
24#include <asm/io.h>
25#include <asm/reg.h>
26#include <asm/nvram.h>
27#include <asm/cache.h>
28#include <asm/8xx_immap.h>
29#include <asm/machdep.h>
30
31static struct tau_temp
32{
33 int interrupts;
34 unsigned char low;
35 unsigned char high;
36 unsigned char grew;
37} tau[NR_CPUS];
38
39struct timer_list tau_timer;
40
41#undef DEBUG
42
43/* TODO: put these in a /proc interface, with some sanity checks, and maybe
44 * dynamic adjustment to minimize # of interrupts */
45/* configurable values for step size and how much to expand the window when
46 * we get an interrupt. These are based on the limit that was out of range */
47#define step_size 2 /* step size when temp goes out of range */
48#define window_expand 1 /* expand the window by this much */
49/* configurable values for shrinking the window */
50#define shrink_timer 2*HZ /* period between shrinking the window */
51#define min_window 2 /* minimum window size, degrees C */
52
53void set_thresholds(unsigned long cpu)
54{
55#ifdef CONFIG_TAU_INT
56 /*
57 * setup THRM1,
58 * threshold, valid bit, enable interrupts, interrupt when below threshold
59 */
60 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
61
62 /* setup THRM2,
63 * threshold, valid bit, enable interrupts, interrupt when above threshhold
64 */
65 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
66#else
67 /* same thing but don't enable interrupts */
68 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
69 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
70#endif
71}
72
73void TAUupdate(int cpu)
74{
75 unsigned thrm;
76
77#ifdef DEBUG
78 printk("TAUupdate ");
79#endif
80
81 /* if both thresholds are crossed, the step_sizes cancel out
82 * and the window winds up getting expanded twice. */
83 if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
84 if(thrm & THRM1_TIN){ /* crossed low threshold */
85 if (tau[cpu].low >= step_size){
86 tau[cpu].low -= step_size;
87 tau[cpu].high -= (step_size - window_expand);
88 }
89 tau[cpu].grew = 1;
90#ifdef DEBUG
91 printk("low threshold crossed ");
92#endif
93 }
94 }
95 if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
96 if(thrm & THRM1_TIN){ /* crossed high threshold */
97 if (tau[cpu].high <= 127-step_size){
98 tau[cpu].low += (step_size - window_expand);
99 tau[cpu].high += step_size;
100 }
101 tau[cpu].grew = 1;
102#ifdef DEBUG
103 printk("high threshold crossed ");
104#endif
105 }
106 }
107
108#ifdef DEBUG
109 printk("grew = %d\n", tau[cpu].grew);
110#endif
111
112#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
113 set_thresholds(cpu);
114#endif
115
116}
117
118#ifdef CONFIG_TAU_INT
119/*
120 * TAU interrupts - called when we have a thermal assist unit interrupt
121 * with interrupts disabled
122 */
123
124void TAUException(struct pt_regs * regs)
125{
126 int cpu = smp_processor_id();
127
128 irq_enter();
129 tau[cpu].interrupts++;
130
131 TAUupdate(cpu);
132
133 irq_exit();
134}
135#endif /* CONFIG_TAU_INT */
136
137static void tau_timeout(void * info)
138{
139 int cpu;
140 unsigned long flags;
141 int size;
142 int shrink;
143
144 /* disabling interrupts *should* be okay */
145 local_irq_save(flags);
146 cpu = smp_processor_id();
147
148#ifndef CONFIG_TAU_INT
149 TAUupdate(cpu);
150#endif
151
152 size = tau[cpu].high - tau[cpu].low;
153 if (size > min_window && ! tau[cpu].grew) {
154 /* do an exponential shrink of half the amount currently over size */
155 shrink = (2 + size - min_window) / 4;
156 if (shrink) {
157 tau[cpu].low += shrink;
158 tau[cpu].high -= shrink;
159 } else { /* size must have been min_window + 1 */
160 tau[cpu].low += 1;
161#if 1 /* debug */
162 if ((tau[cpu].high - tau[cpu].low) != min_window){
163 printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__);
164 }
165#endif
166 }
167 }
168
169 tau[cpu].grew = 0;
170
171 set_thresholds(cpu);
172
173 /*
174 * Do the enable every time, since otherwise a bunch of (relatively)
175 * complex sleep code needs to be added. One mtspr every time
176 * tau_timeout is called is probably not a big deal.
177 *
178 * Enable thermal sensor and set up sample interval timer
179 * need 20 us to do the compare.. until a nice 'cpu_speed' function
180 * call is implemented, just assume a 500 mhz clock. It doesn't really
181 * matter if we take too long for a compare since it's all interrupt
182 * driven anyway.
183 *
184 * use a extra long time.. (60 us @ 500 mhz)
185 */
186 mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
187
188 local_irq_restore(flags);
189}
190
191static void tau_timeout_smp(unsigned long unused)
192{
193
194 /* schedule ourselves to be run again */
195 mod_timer(&tau_timer, jiffies + shrink_timer) ;
196 on_each_cpu(tau_timeout, NULL, 1, 0);
197}
198
199/*
200 * setup the TAU
201 *
202 * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound.
203 * Start off at zero
204 */
205
206int tau_initialized = 0;
207
208void __init TAU_init_smp(void * info)
209{
210 unsigned long cpu = smp_processor_id();
211
212 /* set these to a reasonable value and let the timer shrink the
213 * window */
214 tau[cpu].low = 5;
215 tau[cpu].high = 120;
216
217 set_thresholds(cpu);
218}
219
220int __init TAU_init(void)
221{
222 /* We assume in SMP that if one CPU has TAU support, they
223 * all have it --BenH
224 */
225 if (!cpu_has_feature(CPU_FTR_TAU)) {
226 printk("Thermal assist unit not available\n");
227 tau_initialized = 0;
228 return 1;
229 }
230
231
232 /* first, set up the window shrinking timer */
233 init_timer(&tau_timer);
234 tau_timer.function = tau_timeout_smp;
235 tau_timer.expires = jiffies + shrink_timer;
236 add_timer(&tau_timer);
237
238 on_each_cpu(TAU_init_smp, NULL, 1, 0);
239
240 printk("Thermal assist unit ");
241#ifdef CONFIG_TAU_INT
242 printk("using interrupts, ");
243#else
244 printk("using timers, ");
245#endif
246 printk("shrink_timer: %d jiffies\n", shrink_timer);
247 tau_initialized = 1;
248
249 return 0;
250}
251
252__initcall(TAU_init);
253
254/*
255 * return current temp
256 */
257
258u32 cpu_temp_both(unsigned long cpu)
259{
260 return ((tau[cpu].high << 16) | tau[cpu].low);
261}
262
263int cpu_temp(unsigned long cpu)
264{
265 return ((tau[cpu].high + tau[cpu].low) / 2);
266}
267
268int tau_interrupts(unsigned long cpu)
269{
270 return (tau[cpu].interrupts);
271}