aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile11
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S474
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/firmware.c4
-rw-r--r--arch/powerpc/kernel/head_64.S32
-rw-r--r--arch/powerpc/kernel/idle.c (renamed from arch/powerpc/kernel/idle_64.c)79
-rw-r--r--arch/powerpc/kernel/idle_6xx.S18
-rw-r--r--arch/powerpc/kernel/idle_power4.S38
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S471
-rw-r--r--arch/powerpc/kernel/legacy_serial.c42
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/module_32.c320
-rw-r--r--arch/powerpc/kernel/nvram_64.c7
-rw-r--r--arch/powerpc/kernel/paca.c21
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/perfmon_fsl_booke.c222
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c3
-rw-r--r--arch/powerpc/kernel/process.c11
-rw-r--r--arch/powerpc/kernel/prom.c154
-rw-r--r--arch/powerpc/kernel/prom_init.c68
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c8
-rw-r--r--arch/powerpc/kernel/setup-common.c70
-rw-r--r--arch/powerpc/kernel/setup_32.c75
-rw-r--r--arch/powerpc/kernel/setup_64.c78
-rw-r--r--arch/powerpc/kernel/signal_32.c1
-rw-r--r--arch/powerpc/kernel/signal_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/swsusp_32.S349
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c12
-rw-r--r--arch/powerpc/kernel/tau_6xx.c271
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/traps.c35
-rw-r--r--arch/powerpc/kernel/vdso.c9
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S381
40 files changed, 2663 insertions, 640 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 80e9fe2632b8..0cc0995b81b0 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,12 +12,12 @@ endif
12 12
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 irq.o align.o signal_32.o pmc.o vdso.o \ 14 irq.o align.o signal_32.o pmc.o vdso.o \
15 init_task.o process.o systbl.o 15 init_task.o process.o systbl.o idle.o
16obj-y += vdso32/ 16obj-y += vdso32/
17obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 17obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
18 signal_64.o ptrace32.o \ 18 signal_64.o ptrace32.o \
19 paca.o cpu_setup_power4.o \ 19 paca.o cpu_setup_power4.o \
20 firmware.o sysfs.o idle_64.o 20 firmware.o sysfs.o
21obj-$(CONFIG_PPC64) += vdso64/ 21obj-$(CONFIG_PPC64) += vdso64/
22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
23obj-$(CONFIG_POWER4) += idle_power4.o 23obj-$(CONFIG_POWER4) += idle_power4.o
@@ -34,6 +34,11 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o
34obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o 34obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
35obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o 35obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
36obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 36obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
37obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
38obj-$(CONFIG_TAU) += tau_6xx.o
39obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
40obj32-$(CONFIG_MODULES) += module_32.o
41obj-$(CONFIG_E500) += perfmon_fsl_booke.o
37 42
38ifeq ($(CONFIG_PPC_MERGE),y) 43ifeq ($(CONFIG_PPC_MERGE),y)
39 44
@@ -51,7 +56,6 @@ obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
51obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 56obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
52obj-$(CONFIG_MODULES) += ppc_ksyms.o 57obj-$(CONFIG_MODULES) += ppc_ksyms.o
53obj-$(CONFIG_BOOTX_TEXT) += btext.o 58obj-$(CONFIG_BOOTX_TEXT) += btext.o
54obj-$(CONFIG_6xx) += idle_6xx.o
55obj-$(CONFIG_SMP) += smp.o 59obj-$(CONFIG_SMP) += smp.o
56obj-$(CONFIG_KPROBES) += kprobes.o 60obj-$(CONFIG_KPROBES) += kprobes.o
57obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o 61obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
@@ -77,6 +81,7 @@ smpobj-$(CONFIG_SMP) += smp.o
77 81
78endif 82endif
79 83
84obj-$(CONFIG_PPC32) += $(obj32-y)
80obj-$(CONFIG_PPC64) += $(obj64-y) 85obj-$(CONFIG_PPC64) += $(obj64-y)
81 86
82extra-$(CONFIG_PPC_FPU) += fpu.o 87extra-$(CONFIG_PPC_FPU) += fpu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 882889b15926..54b48f330051 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -105,8 +105,6 @@ int main(void)
105 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); 105 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
106 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 106 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
107 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 107 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
108 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
109
110 /* paca */ 108 /* paca */
111 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 109 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
112 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 110 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
new file mode 100644
index 000000000000..55ed7716636f
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -0,0 +1,474 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <linux/config.h>
13#include <asm/processor.h>
14#include <asm/page.h>
15#include <asm/cputable.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/cache.h>
19
20_GLOBAL(__setup_cpu_603)
21 b setup_common_caches
22_GLOBAL(__setup_cpu_604)
23 mflr r4
24 bl setup_common_caches
25 bl setup_604_hid0
26 mtlr r4
27 blr
28_GLOBAL(__setup_cpu_750)
29 mflr r4
30 bl __init_fpu_registers
31 bl setup_common_caches
32 bl setup_750_7400_hid0
33 mtlr r4
34 blr
35_GLOBAL(__setup_cpu_750cx)
36 mflr r4
37 bl __init_fpu_registers
38 bl setup_common_caches
39 bl setup_750_7400_hid0
40 bl setup_750cx
41 mtlr r4
42 blr
43_GLOBAL(__setup_cpu_750fx)
44 mflr r4
45 bl __init_fpu_registers
46 bl setup_common_caches
47 bl setup_750_7400_hid0
48 bl setup_750fx
49 mtlr r4
50 blr
51_GLOBAL(__setup_cpu_7400)
52 mflr r4
53 bl __init_fpu_registers
54 bl setup_7400_workarounds
55 bl setup_common_caches
56 bl setup_750_7400_hid0
57 mtlr r4
58 blr
59_GLOBAL(__setup_cpu_7410)
60 mflr r4
61 bl __init_fpu_registers
62 bl setup_7410_workarounds
63 bl setup_common_caches
64 bl setup_750_7400_hid0
65 li r3,0
66 mtspr SPRN_L2CR2,r3
67 mtlr r4
68 blr
69_GLOBAL(__setup_cpu_745x)
70 mflr r4
71 bl setup_common_caches
72 bl setup_745x_specifics
73 mtlr r4
74 blr
75
76/* Enable caches for 603's, 604, 750 & 7400 */
77setup_common_caches:
78 mfspr r11,SPRN_HID0
79 andi. r0,r11,HID0_DCE
80 ori r11,r11,HID0_ICE|HID0_DCE
81 ori r8,r11,HID0_ICFI
82 bne 1f /* don't invalidate the D-cache */
83 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
841: sync
85 mtspr SPRN_HID0,r8 /* enable and invalidate caches */
86 sync
87 mtspr SPRN_HID0,r11 /* enable caches */
88 sync
89 isync
90 blr
91
92/* 604, 604e, 604ev, ...
93 * Enable superscalar execution & branch history table
94 */
95setup_604_hid0:
96 mfspr r11,SPRN_HID0
97 ori r11,r11,HID0_SIED|HID0_BHTE
98 ori r8,r11,HID0_BTCD
99 sync
100 mtspr SPRN_HID0,r8 /* flush branch target address cache */
101 sync /* on 604e/604r */
102 mtspr SPRN_HID0,r11
103 sync
104 isync
105 blr
106
107/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
108 * erratas we work around here.
109 * Moto MPC710CE.pdf describes them, those are errata
110 * #3, #4 and #5
111 * Note that we assume the firmware didn't choose to
112 * apply other workarounds (there are other ones documented
113 * in the .pdf). It appear that Apple firmware only works
114 * around #3 and with the same fix we use. We may want to
115 * check if the CPU is using 60x bus mode in which case
116 * the workaround for errata #4 is useless. Also, we may
117 * want to explicitely clear HID0_NOPDST as this is not
118 * needed once we have applied workaround #5 (though it's
119 * not set by Apple's firmware at least).
120 */
121setup_7400_workarounds:
122 mfpvr r3
123 rlwinm r3,r3,0,20,31
124 cmpwi 0,r3,0x0207
125 ble 1f
126 blr
127setup_7410_workarounds:
128 mfpvr r3
129 rlwinm r3,r3,0,20,31
130 cmpwi 0,r3,0x0100
131 bnelr
1321:
133 mfspr r11,SPRN_MSSSR0
134 /* Errata #3: Set L1OPQ_SIZE to 0x10 */
135 rlwinm r11,r11,0,9,6
136 oris r11,r11,0x0100
137 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
138 oris r11,r11,0x0002
139 /* Errata #5: Set DRLT_SIZE to 0x01 */
140 rlwinm r11,r11,0,5,2
141 oris r11,r11,0x0800
142 sync
143 mtspr SPRN_MSSSR0,r11
144 sync
145 isync
146 blr
147
148/* 740/750/7400/7410
149 * Enable Store Gathering (SGE), Address Brodcast (ABE),
150 * Branch History Table (BHTE), Branch Target ICache (BTIC)
151 * Dynamic Power Management (DPM), Speculative (SPD)
152 * Clear Instruction cache throttling (ICTC)
153 */
154setup_750_7400_hid0:
155 mfspr r11,SPRN_HID0
156 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
157 oris r11,r11,HID0_DPM@h
158BEGIN_FTR_SECTION
159 xori r11,r11,HID0_BTIC
160END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
161BEGIN_FTR_SECTION
162 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
163END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
164 li r3,HID0_SPD
165 andc r11,r11,r3 /* clear SPD: enable speculative */
166 li r3,0
167 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
168 isync
169 mtspr SPRN_HID0,r11
170 sync
171 isync
172 blr
173
174/* 750cx specific
175 * Looks like we have to disable NAP feature for some PLL settings...
176 * (waiting for confirmation)
177 */
178setup_750cx:
179 mfspr r10, SPRN_HID1
180 rlwinm r10,r10,4,28,31
181 cmpwi cr0,r10,7
182 cmpwi cr1,r10,9
183 cmpwi cr2,r10,11
184 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
185 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
186 bnelr
187 lwz r6,CPU_SPEC_FEATURES(r5)
188 li r7,CPU_FTR_CAN_NAP
189 andc r6,r6,r7
190 stw r6,CPU_SPEC_FEATURES(r5)
191 blr
192
193/* 750fx specific
194 */
195setup_750fx:
196 blr
197
198/* MPC 745x
199 * Enable Store Gathering (SGE), Branch Folding (FOLD)
200 * Branch History Table (BHTE), Branch Target ICache (BTIC)
201 * Dynamic Power Management (DPM), Speculative (SPD)
202 * Ensure our data cache instructions really operate.
203 * Timebase has to be running or we wouldn't have made it here,
204 * just ensure we don't disable it.
205 * Clear Instruction cache throttling (ICTC)
206 * Enable L2 HW prefetch
207 */
208setup_745x_specifics:
209 /* We check for the presence of an L3 cache setup by
210 * the firmware. If any, we disable NAP capability as
211 * it's known to be bogus on rev 2.1 and earlier
212 */
213 mfspr r11,SPRN_L3CR
214 andis. r11,r11,L3CR_L3E@h
215 beq 1f
216 lwz r6,CPU_SPEC_FEATURES(r5)
217 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
218 beq 1f
219 li r7,CPU_FTR_CAN_NAP
220 andc r6,r6,r7
221 stw r6,CPU_SPEC_FEATURES(r5)
2221:
223 mfspr r11,SPRN_HID0
224
225 /* All of the bits we have to set.....
226 */
227 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
228 ori r11,r11,HID0_LRSTK | HID0_BTIC
229 oris r11,r11,HID0_DPM@h
230BEGIN_FTR_SECTION
231 xori r11,r11,HID0_BTIC
232END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
233BEGIN_FTR_SECTION
234 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
235END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
236
237 /* All of the bits we have to clear....
238 */
239 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
240 andc r11,r11,r3 /* clear SPD: enable speculative */
241 li r3,0
242
243 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
244 isync
245 mtspr SPRN_HID0,r11
246 sync
247 isync
248
249 /* Enable L2 HW prefetch, if L2 is enabled
250 */
251 mfspr r3,SPRN_L2CR
252 andis. r3,r3,L2CR_L2E@h
253 beqlr
254 mfspr r3,SPRN_MSSCR0
255 ori r3,r3,3
256 sync
257 mtspr SPRN_MSSCR0,r3
258 sync
259 isync
260 blr
261
262/*
263 * Initialize the FPU registers. This is needed to work around an errata
264 * in some 750 cpus where using a not yet initialized FPU register after
265 * power on reset may hang the CPU
266 */
267_GLOBAL(__init_fpu_registers)
268 mfmsr r10
269 ori r11,r10,MSR_FP
270 mtmsr r11
271 isync
272 addis r9,r3,empty_zero_page@ha
273 addi r9,r9,empty_zero_page@l
274 REST_32FPRS(0,r9)
275 sync
276 mtmsr r10
277 isync
278 blr
279
280
281/* Definitions for the table use to save CPU states */
282#define CS_HID0 0
283#define CS_HID1 4
284#define CS_HID2 8
285#define CS_MSSCR0 12
286#define CS_MSSSR0 16
287#define CS_ICTRL 20
288#define CS_LDSTCR 24
289#define CS_LDSTDB 28
290#define CS_SIZE 32
291
292 .data
293 .balign L1_CACHE_BYTES
294cpu_state_storage:
295 .space CS_SIZE
296 .balign L1_CACHE_BYTES,0
297 .text
298
299/* Called in normal context to backup CPU 0 state. This
300 * does not include cache settings. This function is also
301 * called for machine sleep. This does not include the MMU
302 * setup, BATs, etc... but rather the "special" registers
303 * like HID0, HID1, MSSCR0, etc...
304 */
305_GLOBAL(__save_cpu_setup)
306 /* Some CR fields are volatile, we back it up all */
307 mfcr r7
308
309 /* Get storage ptr */
310 lis r5,cpu_state_storage@h
311 ori r5,r5,cpu_state_storage@l
312
313 /* Save HID0 (common to all CONFIG_6xx cpus) */
314 mfspr r3,SPRN_HID0
315 stw r3,CS_HID0(r5)
316
317 /* Now deal with CPU type dependent registers */
318 mfspr r3,SPRN_PVR
319 srwi r3,r3,16
320 cmplwi cr0,r3,0x8000 /* 7450 */
321 cmplwi cr1,r3,0x000c /* 7400 */
322 cmplwi cr2,r3,0x800c /* 7410 */
323 cmplwi cr3,r3,0x8001 /* 7455 */
324 cmplwi cr4,r3,0x8002 /* 7457 */
325 cmplwi cr5,r3,0x8003 /* 7447A */
326 cmplwi cr6,r3,0x7000 /* 750FX */
327 cmplwi cr7,r3,0x8004 /* 7448 */
328 /* cr1 is 7400 || 7410 */
329 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
330 /* cr0 is 74xx */
331 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
332 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
333 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
334 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
335 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
336 bne 1f
337 /* Backup 74xx specific regs */
338 mfspr r4,SPRN_MSSCR0
339 stw r4,CS_MSSCR0(r5)
340 mfspr r4,SPRN_MSSSR0
341 stw r4,CS_MSSSR0(r5)
342 beq cr1,1f
343 /* Backup 745x specific registers */
344 mfspr r4,SPRN_HID1
345 stw r4,CS_HID1(r5)
346 mfspr r4,SPRN_ICTRL
347 stw r4,CS_ICTRL(r5)
348 mfspr r4,SPRN_LDSTCR
349 stw r4,CS_LDSTCR(r5)
350 mfspr r4,SPRN_LDSTDB
351 stw r4,CS_LDSTDB(r5)
3521:
353 bne cr6,1f
354 /* Backup 750FX specific registers */
355 mfspr r4,SPRN_HID1
356 stw r4,CS_HID1(r5)
357 /* If rev 2.x, backup HID2 */
358 mfspr r3,SPRN_PVR
359 andi. r3,r3,0xff00
360 cmpwi cr0,r3,0x0200
361 bne 1f
362 mfspr r4,SPRN_HID2
363 stw r4,CS_HID2(r5)
3641:
365 mtcr r7
366 blr
367
368/* Called with no MMU context (typically MSR:IR/DR off) to
369 * restore CPU state as backed up by the previous
370 * function. This does not include cache setting
371 */
372_GLOBAL(__restore_cpu_setup)
373 /* Some CR fields are volatile, we back it up all */
374 mfcr r7
375
376 /* Get storage ptr */
377 lis r5,(cpu_state_storage-KERNELBASE)@h
378 ori r5,r5,cpu_state_storage@l
379
380 /* Restore HID0 */
381 lwz r3,CS_HID0(r5)
382 sync
383 isync
384 mtspr SPRN_HID0,r3
385 sync
386 isync
387
388 /* Now deal with CPU type dependent registers */
389 mfspr r3,SPRN_PVR
390 srwi r3,r3,16
391 cmplwi cr0,r3,0x8000 /* 7450 */
392 cmplwi cr1,r3,0x000c /* 7400 */
393 cmplwi cr2,r3,0x800c /* 7410 */
394 cmplwi cr3,r3,0x8001 /* 7455 */
395 cmplwi cr4,r3,0x8002 /* 7457 */
396 cmplwi cr5,r3,0x8003 /* 7447A */
397 cmplwi cr6,r3,0x7000 /* 750FX */
398 cmplwi cr7,r3,0x8004 /* 7448 */
399 /* cr1 is 7400 || 7410 */
400 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
401 /* cr0 is 74xx */
402 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
403 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
404 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
405 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
406 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
407 bne 2f
408 /* Restore 74xx specific regs */
409 lwz r4,CS_MSSCR0(r5)
410 sync
411 mtspr SPRN_MSSCR0,r4
412 sync
413 isync
414 lwz r4,CS_MSSSR0(r5)
415 sync
416 mtspr SPRN_MSSSR0,r4
417 sync
418 isync
419 bne cr2,1f
420 /* Clear 7410 L2CR2 */
421 li r4,0
422 mtspr SPRN_L2CR2,r4
4231: beq cr1,2f
424 /* Restore 745x specific registers */
425 lwz r4,CS_HID1(r5)
426 sync
427 mtspr SPRN_HID1,r4
428 isync
429 sync
430 lwz r4,CS_ICTRL(r5)
431 sync
432 mtspr SPRN_ICTRL,r4
433 isync
434 sync
435 lwz r4,CS_LDSTCR(r5)
436 sync
437 mtspr SPRN_LDSTCR,r4
438 isync
439 sync
440 lwz r4,CS_LDSTDB(r5)
441 sync
442 mtspr SPRN_LDSTDB,r4
443 isync
444 sync
4452: bne cr6,1f
446 /* Restore 750FX specific registers
447 * that is restore HID2 on rev 2.x and PLL config & switch
448 * to PLL 0 on all
449 */
450 /* If rev 2.x, restore HID2 with low voltage bit cleared */
451 mfspr r3,SPRN_PVR
452 andi. r3,r3,0xff00
453 cmpwi cr0,r3,0x0200
454 bne 4f
455 lwz r4,CS_HID2(r5)
456 rlwinm r4,r4,0,19,17
457 mtspr SPRN_HID2,r4
458 sync
4594:
460 lwz r4,CS_HID1(r5)
461 rlwinm r5,r4,0,16,14
462 mtspr SPRN_HID1,r5
463 /* Wait for PLL to stabilize */
464 mftbl r5
4653: mftbl r6
466 sub r6,r6,r5
467 cmplwi cr0,r6,10000
468 ble 3b
469 /* Setup final PLL */
470 mtspr SPRN_HID1,r4
4711:
472 mtcr r7
473 blr
474
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4827ca1ec89b..b3a979467225 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -135,10 +135,10 @@ transfer_to_handler:
135 mfspr r11,SPRN_HID0 135 mfspr r11,SPRN_HID0
136 mtcr r11 136 mtcr r11
137BEGIN_FTR_SECTION 137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */ 138 bt- 8,4f /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) 139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION 140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */ 141 bt- 9,4f /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */ 143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont 144 .globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
157 SYNC 157 SYNC
158 RFI /* jump to handler, enable MMU */ 158 RFI /* jump to handler, enable MMU */
159 159
160#ifdef CONFIG_6xx
1614: b power_save_6xx_restore
162#endif
163
160/* 164/*
161 * On kernel stack overflow, load up an initial stack pointer 165 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return. 166 * and call StackOverflow(regs), which should not return.
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 1060155d84c3..19ad5c6b1818 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -617,6 +617,12 @@ _GLOBAL(enter_rtas)
617 mfsrr1 r10 617 mfsrr1 r10
618 std r10,_SRR1(r1) 618 std r10,_SRR1(r1)
619 619
620 /* Temporary workaround to clear CR until RTAS can be modified to
621 * ignore all bits.
622 */
623 li r0,0
624 mtcr r0
625
620 /* There is no way it is acceptable to get here with interrupts enabled, 626 /* There is no way it is acceptable to get here with interrupts enabled,
621 * check it with the asm equivalent of WARN_ON 627 * check it with the asm equivalent of WARN_ON
622 */ 628 */
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index 4d37a3cb80f6..0bfe9061720a 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -14,7 +14,9 @@
14 */ 14 */
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/module.h>
17 18
18#include <asm/firmware.h> 19#include <asm/firmware.h>
19 20
20unsigned long ppc64_firmware_features; 21unsigned long powerpc_firmware_features;
22EXPORT_SYMBOL_GPL(powerpc_firmware_features);
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 35084f3a841b..a5ae04a57c78 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -1544,7 +1544,11 @@ _STATIC(__boot_from_prom)
1544 mr r28,r6 1544 mr r28,r6
1545 mr r27,r7 1545 mr r27,r7
1546 1546
1547 /* Align the stack to 16-byte boundary for broken yaboot */ 1547 /*
1548 * Align the stack to 16-byte boundary
1549 * Depending on the size and layout of the ELF sections in the initial
1550 * boot binary, the stack pointer will be unalignet on PowerMac
1551 */
1548 rldicr r1,r1,0,59 1552 rldicr r1,r1,0,59
1549 1553
1550 /* Make sure we are running in 64 bits mode */ 1554 /* Make sure we are running in 64 bits mode */
@@ -1847,21 +1851,6 @@ _STATIC(start_here_multiplatform)
1847 bl .__save_cpu_setup 1851 bl .__save_cpu_setup
1848 sync 1852 sync
1849 1853
1850 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1851 * note that boot_cpuid can always be 0 nowadays since there is
1852 * nowhere it can be initialized differently before we reach this
1853 * code
1854 */
1855 LOAD_REG_IMMEDIATE(r27, boot_cpuid)
1856 add r27,r27,r26
1857 lwz r27,0(r27)
1858
1859 LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
1860 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1861 add r13,r13,r24 /* for this processor. */
1862 add r13,r13,r26 /* convert to physical addr */
1863 mtspr SPRN_SPRG3,r13
1864
1865 /* Do very early kernel initializations, including initial hash table, 1854 /* Do very early kernel initializations, including initial hash table,
1866 * stab and slb setup before we turn on relocation. */ 1855 * stab and slb setup before we turn on relocation. */
1867 1856
@@ -1930,6 +1919,17 @@ _STATIC(start_here_common)
1930 /* Not reached */ 1919 /* Not reached */
1931 BUG_OPCODE 1920 BUG_OPCODE
1932 1921
1922/* Put the paca pointer into r13 and SPRG3 */
1923_GLOBAL(setup_boot_paca)
1924 LOAD_REG_IMMEDIATE(r3, boot_cpuid)
1925 lwz r3,0(r3)
1926 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
1927 mulli r3,r3,PACA_SIZE /* Calculate vaddr of right paca */
1928 add r13,r3,r4 /* for this processor. */
1929 mtspr SPRN_SPRG3,r13
1930
1931 blr
1932
1933/* 1933/*
1934 * We put a few things here that have to be page-aligned. 1934 * We put a few things here that have to be page-aligned.
1935 * This stuff goes at the beginning of the bss, which is page-aligned. 1935 * This stuff goes at the beginning of the bss, which is page-aligned.
diff --git a/arch/powerpc/kernel/idle_64.c b/arch/powerpc/kernel/idle.c
index b879d3057ef8..e9f321d74d85 100644
--- a/arch/powerpc/kernel/idle_64.c
+++ b/arch/powerpc/kernel/idle.c
@@ -2,13 +2,17 @@
2 * Idle daemon for PowerPC. Idle daemon will handle any action 2 * Idle daemon for PowerPC. Idle daemon will handle any action
3 * that needs to be taken when the system becomes idle. 3 * that needs to be taken when the system becomes idle.
4 * 4 *
5 * Originally Written by Cort Dougan (cort@cs.nmt.edu) 5 * Originally written by Cort Dougan (cort@cs.nmt.edu).
6 * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
7 * Paul Mackerras and others.
6 * 8 *
7 * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com> 9 * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
8 * 10 *
9 * Additional shared processor, SMT, and firmware support 11 * Additional shared processor, SMT, and firmware support
10 * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> 12 * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
11 * 13 *
14 * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
15 *
12 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 17 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 18 * as published by the Free Software Foundation; either version
@@ -29,18 +33,43 @@
29#include <asm/machdep.h> 33#include <asm/machdep.h>
30#include <asm/smp.h> 34#include <asm/smp.h>
31 35
32extern void power4_idle(void); 36#ifdef CONFIG_HOTPLUG_CPU
37#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
38 system_state == SYSTEM_RUNNING)
39#else
40#define cpu_should_die() 0
41#endif
33 42
34void default_idle(void) 43/*
44 * The body of the idle task.
45 */
46void cpu_idle(void)
35{ 47{
36 unsigned int cpu = smp_processor_id(); 48 if (ppc_md.idle_loop)
37 set_thread_flag(TIF_POLLING_NRFLAG); 49 ppc_md.idle_loop(); /* doesn't return */
38 50
51 set_thread_flag(TIF_POLLING_NRFLAG);
39 while (1) { 52 while (1) {
40 if (!need_resched()) { 53 ppc64_runlatch_off();
41 while (!need_resched() && !cpu_is_offline(cpu)) {
42 ppc64_runlatch_off();
43 54
55 while (!need_resched() && !cpu_should_die()) {
56 if (ppc_md.power_save) {
57 clear_thread_flag(TIF_POLLING_NRFLAG);
58 /*
59 * smp_mb is so clearing of TIF_POLLING_NRFLAG
60 * is ordered w.r.t. need_resched() test.
61 */
62 smp_mb();
63 local_irq_disable();
64
65 /* check again after disabling irqs */
66 if (!need_resched() && !cpu_should_die())
67 ppc_md.power_save();
68
69 local_irq_enable();
70 set_thread_flag(TIF_POLLING_NRFLAG);
71
72 } else {
44 /* 73 /*
45 * Go into low thread priority and possibly 74 * Go into low thread priority and possibly
46 * low power mode. 75 * low power mode.
@@ -48,46 +77,18 @@ void default_idle(void)
48 HMT_low(); 77 HMT_low();
49 HMT_very_low(); 78 HMT_very_low();
50 } 79 }
51
52 HMT_medium();
53 } 80 }
54 81
82 HMT_medium();
55 ppc64_runlatch_on(); 83 ppc64_runlatch_on();
84 if (cpu_should_die())
85 cpu_die();
56 preempt_enable_no_resched(); 86 preempt_enable_no_resched();
57 schedule(); 87 schedule();
58 preempt_disable(); 88 preempt_disable();
59 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
60 cpu_die();
61 } 89 }
62} 90}
63 91
64void native_idle(void)
65{
66 while (1) {
67 ppc64_runlatch_off();
68
69 if (!need_resched())
70 power4_idle();
71
72 if (need_resched()) {
73 ppc64_runlatch_on();
74 preempt_enable_no_resched();
75 schedule();
76 preempt_disable();
77 }
78
79 if (cpu_is_offline(smp_processor_id()) &&
80 system_state == SYSTEM_RUNNING)
81 cpu_die();
82 }
83}
84
85void cpu_idle(void)
86{
87 BUG_ON(NULL == ppc_md.idle_loop);
88 ppc_md.idle_loop();
89}
90
91int powersave_nap; 92int powersave_nap;
92 93
93#ifdef CONFIG_SYSCTL 94#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 444fdcc769f1..12a4efbaa08f 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -87,19 +87,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0 87 cmpwi 0,r3,0
88 beqlr 88 beqlr
89 89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */ 90 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h 91 andis. r0,r3,HID0_NAP@h
105 beq 2f 92 beq 2f
@@ -157,7 +144,8 @@ BEGIN_FTR_SECTION
157 DSSALL 144 DSSALL
158 sync 145 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 146END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */ 147 mfmsr r7
148 ori r7,r7,MSR_EE
161 oris r7,r7,MSR_POW@h 149 oris r7,r7,MSR_POW@h
162 sync 150 sync
163 isync 151 isync
@@ -220,8 +208,6 @@ _GLOBAL(nap_save_msscr0)
220_GLOBAL(nap_save_hid1) 208_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS 209 .space 4*NR_CPUS
222 210
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed) 211_GLOBAL(powersave_lowspeed)
226 .long 0 212 .long 0
227 213
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index c16b4afab582..6dad1c02496e 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -1,11 +1,5 @@
1/* 1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs 2 * This file contains the power_save function for 970-family CPUs.
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 * 3 *
10 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -26,49 +20,23 @@
26 20
27 .text 21 .text
28 22
29/*
30 * Here is the power_save_6xx function. This could eventually be
31 * split into several functions & changing the function pointer
32 * depending on the various features.
33 */
34_GLOBAL(power4_idle) 23_GLOBAL(power4_idle)
35BEGIN_FTR_SECTION 24BEGIN_FTR_SECTION
36 blr 25 blr
37END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) 26END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
38 /* We must dynamically check for the NAP feature as it
39 * can be cleared by CPU init after the fixups are done
40 */
41 LOAD_REG_ADDRBASE(r3,cur_cpu_spec)
42 ld r4,ADDROFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr
46 /* Now check if user or arch enabled NAP mode */ 27 /* Now check if user or arch enabled NAP mode */
47 LOAD_REG_ADDRBASE(r3,powersave_nap) 28 LOAD_REG_ADDRBASE(r3,powersave_nap)
48 lwz r4,ADDROFF(powersave_nap)(r3) 29 lwz r4,ADDROFF(powersave_nap)(r3)
49 cmpwi 0,r4,0 30 cmpwi 0,r4,0
50 beqlr 31 beqlr
51 32
52 /* Clear MSR:EE */
53 mfmsr r7
54 li r4,0
55 ori r4,r4,MSR_EE
56 andc r0,r7,r4
57 mtmsrd r0
58
59 /* Check current_thread_info()->flags */
60 clrrdi r4,r1,THREAD_SHIFT
61 ld r4,TI_FLAGS(r4)
62 andi. r0,r4,_TIF_NEED_RESCHED
63 beq 1f
64 mtmsrd r7 /* out of line this ? */
65 blr
661:
67 /* Go to NAP now */ 33 /* Go to NAP now */
68BEGIN_FTR_SECTION 34BEGIN_FTR_SECTION
69 DSSALL 35 DSSALL
70 sync 36 sync
71END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 37END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
38 mfmsr r7
39 ori r7,r7,MSR_EE
72 oris r7,r7,MSR_POW@h 40 oris r7,r7,MSR_POW@h
73 sync 41 sync
74 isync 42 isync
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 771a59cbd213..bb5c9501234c 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -379,7 +379,7 @@ void irq_ctx_init(void)
379 struct thread_info *tp; 379 struct thread_info *tp;
380 int i; 380 int i;
381 381
382 for_each_cpu(i) { 382 for_each_possible_cpu(i) {
383 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 383 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
384 tp = softirq_ctx[i]; 384 tp = softirq_ctx[i];
385 tp->cpu = i; 385 tp->cpu = i;
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
new file mode 100644
index 000000000000..d7f4e982b539
--- /dev/null
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -0,0 +1,471 @@
1/*
2 L2CR functions
3 Copyright © 1997-1998 by PowerLogix R & D, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19/*
20 Thur, Dec. 12, 1998.
21 - First public release, contributed by PowerLogix.
22 ***********
23 Sat, Aug. 7, 1999.
24 - Terry: Made sure code disabled interrupts before running. (Previously
25 it was assumed interrupts were already disabled).
26 - Terry: Updated for tentative G4 support. 4MB of memory is now flushed
27 instead of 2MB. (Prob. only 3 is necessary).
28 - Terry: Updated for workaround to HID0[DPM] processor bug
29 during global invalidates.
30 ***********
31 Thu, July 13, 2000.
32 - Terry: Added isync to correct for an errata.
33
34 22 August 2001.
35 - DanM: Finally added the 7450 patch I've had for the past
36 several months. The L2CR is similar, but I'm going
37 to assume the user of this functions knows what they
38 are doing.
39
40 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
41 Please e-mail updates to this file to me, thanks!
42*/
43#include <linux/config.h>
44#include <asm/processor.h>
45#include <asm/cputable.h>
46#include <asm/ppc_asm.h>
47#include <asm/cache.h>
48#include <asm/page.h>
49
50/* Usage:
51
52 When setting the L2CR register, you must do a few special
53 things. If you are enabling the cache, you must perform a
54 global invalidate. If you are disabling the cache, you must
55 flush the cache contents first. This routine takes care of
56 doing these things. When first enabling the cache, make sure
57 you pass in the L2CR you want, as well as passing in the
58 global invalidate bit set. A global invalidate will only be
59 performed if the L2I bit is set in applyThis. When enabling
60 the cache, you should also set the L2E bit in applyThis. If
61 you want to modify the L2CR contents after the cache has been
62 enabled, the recommended procedure is to first call
63 __setL2CR(0) to disable the cache and then call it again with
64 the new values for L2CR. Examples:
65
66 _setL2CR(0) - disables the cache
67 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
68 - L2E set to turn on the cache
69 - L2SIZ set to 1MB
70 - L2CLK set to 1:1
71 - L2RAM set to pipelined synchronous late-write
72 - L2I set to perform a global invalidation
73 - L2OH set to 0.5 nS
74 - L2DF set because this upgrade card
75 requires it
76
77 A similar call should work for your card. You need to know
78 the correct setting for your card and then place them in the
79 fields I have outlined above. Other fields support optional
80 features, such as L2DO which caches only data, or L2TS which
81 causes cache pushes from the L1 cache to go to the L2 cache
82 instead of to main memory.
83
84IMPORTANT:
85 Starting with the 7450, the bits in this register have moved
86 or behave differently. The Enable, Parity Enable, Size,
87 and L2 Invalidate are the only bits that have not moved.
88 The size is read-only for these processors with internal L2
89 cache, and the invalidate is a control as well as status.
90 -- Dan
91
92*/
93/*
94 * Summary: this procedure ignores the L2I bit in the value passed in,
95 * flushes the cache if it was already enabled, always invalidates the
96 * cache, then enables the cache if the L2E bit is set in the value
97 * passed in.
98 * -- paulus.
99 */
100_GLOBAL(_set_L2CR)
101 /* Make sure this is a 750 or 7400 chip */
102BEGIN_FTR_SECTION
103 li r3,-1
104 blr
105END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
106
107 mflr r9
108
109 /* Stop DST streams */
110BEGIN_FTR_SECTION
111 DSSALL
112 sync
113END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
114
115 /* Turn off interrupts and data relocation. */
116 mfmsr r7 /* Save MSR in r7 */
117 rlwinm r4,r7,0,17,15
118 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
119 sync
120 mtmsr r4
121 isync
122
123 /* Before we perform the global invalidation, we must disable dynamic
124 * power management via HID0[DPM] to work around a processor bug where
125 * DPM can possibly interfere with the state machine in the processor
126 * that invalidates the L2 cache tags.
127 */
128 mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
129 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
130 sync
131 mtspr SPRN_HID0,r4 /* Disable DPM */
132 sync
133
134 /* Get the current enable bit of the L2CR into r4 */
135 mfspr r4,SPRN_L2CR
136
137 /* Tweak some bits */
138 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
139 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
140 rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
141
142 /* Check to see if we need to flush */
143 rlwinm. r4,r4,0,0,0
144 beq 2f
145
146 /* Flush the cache. First, read the first 4MB of memory (physical) to
147 * put new data in the cache. (Actually we only need
148 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
149 * cover everything just to be safe).
150 */
151
152 /**** Might be a good idea to set L2DO here - to prevent instructions
153 from getting into the cache. But since we invalidate
154 the next time we enable the cache it doesn't really matter.
155 Don't do this unless you accomodate all processor variations.
156 The bit moved on the 7450.....
157 ****/
158
159BEGIN_FTR_SECTION
160 /* Disable L2 prefetch on some 745x and try to ensure
161 * L2 prefetch engines are idle. As explained by errata
162 * text, we can't be sure they are, we just hope very hard
163 * that well be enough (sic !). At least I noticed Apple
164 * doesn't even bother doing the dcbf's here...
165 */
166 mfspr r4,SPRN_MSSCR0
167 rlwinm r4,r4,0,0,29
168 sync
169 mtspr SPRN_MSSCR0,r4
170 sync
171 isync
172 lis r4,KERNELBASE@h
173 dcbf 0,r4
174 dcbf 0,r4
175 dcbf 0,r4
176 dcbf 0,r4
177END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
178
179 /* TODO: use HW flush assist when available */
180
181 lis r4,0x0002
182 mtctr r4
183 li r4,0
1841:
185 lwzx r0,r0,r4
186 addi r4,r4,32 /* Go to start of next cache line */
187 bdnz 1b
188 isync
189
190 /* Now, flush the first 4MB of memory */
191 lis r4,0x0002
192 mtctr r4
193 li r4,0
194 sync
1951:
196 dcbf 0,r4
197 addi r4,r4,32 /* Go to start of next cache line */
198 bdnz 1b
199
2002:
201 /* Set up the L2CR configuration bits (and switch L2 off) */
202 /* CPU errata: Make sure the mtspr below is already in the
203 * L1 icache
204 */
205 b 20f
206 .balign L1_CACHE_BYTES
20722:
208 sync
209 mtspr SPRN_L2CR,r3
210 sync
211 b 23f
21220:
213 b 21f
21421: sync
215 isync
216 b 22b
217
21823:
219 /* Perform a global invalidation */
220 oris r3,r3,0x0020
221 sync
222 mtspr SPRN_L2CR,r3
223 sync
224 isync /* For errata */
225
226BEGIN_FTR_SECTION
227 /* On the 7450, we wait for the L2I bit to clear......
228 */
22910: mfspr r3,SPRN_L2CR
230 andis. r4,r3,0x0020
231 bne 10b
232 b 11f
233END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
234
235 /* Wait for the invalidation to complete */
2363: mfspr r3,SPRN_L2CR
237 rlwinm. r4,r3,0,31,31
238 bne 3b
239
24011: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
241 sync
242 mtspr SPRN_L2CR,r3
243 sync
244
245 /* See if we need to enable the cache */
246 cmplwi r5,0
247 beq 4f
248
249 /* Enable the cache */
250 oris r3,r3,0x8000
251 mtspr SPRN_L2CR,r3
252 sync
253
254 /* Enable L2 HW prefetch on 744x/745x */
255BEGIN_FTR_SECTION
256 mfspr r3,SPRN_MSSCR0
257 ori r3,r3,3
258 sync
259 mtspr SPRN_MSSCR0,r3
260 sync
261 isync
262END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2634:
264
265 /* Restore HID0[DPM] to whatever it was before */
266 sync
267 mtspr 1008,r8
268 sync
269
270 /* Restore MSR (restores EE and DR bits to original state) */
271 SYNC
272 mtmsr r7
273 isync
274
275 mtlr r9
276 blr
277
278_GLOBAL(_get_L2CR)
279 /* Return the L2CR contents */
280 li r3,0
281BEGIN_FTR_SECTION
282 mfspr r3,SPRN_L2CR
283END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
284 blr
285
286
287/*
288 * Here is a similar routine for dealing with the L3 cache
289 * on the 745x family of chips
290 */
291
292_GLOBAL(_set_L3CR)
293 /* Make sure this is a 745x chip */
294BEGIN_FTR_SECTION
295 li r3,-1
296 blr
297END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
298
299 /* Turn off interrupts and data relocation. */
300 mfmsr r7 /* Save MSR in r7 */
301 rlwinm r4,r7,0,17,15
302 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
303 sync
304 mtmsr r4
305 isync
306
307 /* Stop DST streams */
308 DSSALL
309 sync
310
311 /* Get the current enable bit of the L3CR into r4 */
312 mfspr r4,SPRN_L3CR
313
314 /* Tweak some bits */
315 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
316 rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */
317 rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */
318 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
319 /* Check to see if we need to flush */
320 rlwinm. r4,r4,0,0,0
321 beq 2f
322
323 /* Flush the cache.
324 */
325
326 /* TODO: use HW flush assist */
327
328 lis r4,0x0008
329 mtctr r4
330 li r4,0
3311:
332 lwzx r0,r0,r4
333 dcbf 0,r4
334 addi r4,r4,32 /* Go to start of next cache line */
335 bdnz 1b
336
3372:
338 /* Set up the L3CR configuration bits (and switch L3 off) */
339 sync
340 mtspr SPRN_L3CR,r3
341 sync
342
343 oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */
344 mtspr SPRN_L3CR,r3
345 sync
346 oris r3,r3,L3CR_L3CLKEN@h /* Set clken */
347 mtspr SPRN_L3CR,r3
348 sync
349
350 /* Wait for stabilize */
351 li r0,256
352 mtctr r0
3531: bdnz 1b
354
355 /* Perform a global invalidation */
356 ori r3,r3,0x0400
357 sync
358 mtspr SPRN_L3CR,r3
359 sync
360 isync
361
362 /* We wait for the L3I bit to clear...... */
36310: mfspr r3,SPRN_L3CR
364 andi. r4,r3,0x0400
365 bne 10b
366
367 /* Clear CLKEN */
368 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
369 mtspr SPRN_L3CR,r3
370 sync
371
372 /* Wait for stabilize */
373 li r0,256
374 mtctr r0
3751: bdnz 1b
376
377 /* See if we need to enable the cache */
378 cmplwi r5,0
379 beq 4f
380
381 /* Enable the cache */
382 oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
383 mtspr SPRN_L3CR,r3
384 sync
385
386 /* Wait for stabilize */
387 li r0,256
388 mtctr r0
3891: bdnz 1b
390
391 /* Restore MSR (restores EE and DR bits to original state) */
3924: SYNC
393 mtmsr r7
394 isync
395 blr
396
397_GLOBAL(_get_L3CR)
398 /* Return the L3CR contents */
399 li r3,0
400BEGIN_FTR_SECTION
401 mfspr r3,SPRN_L3CR
402END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
403 blr
404
405/* --- End of PowerLogix code ---
406 */
407
408
409/* flush_disable_L1() - Flush and disable L1 cache
410 *
411 * clobbers r0, r3, ctr, cr0
412 * Must be called with interrupts disabled and MMU enabled.
413 */
414_GLOBAL(__flush_disable_L1)
415 /* Stop pending alitvec streams and memory accesses */
416BEGIN_FTR_SECTION
417 DSSALL
418END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
419 sync
420
421 /* Load counter to 0x4000 cache lines (512k) and
422 * load cache with datas
423 */
424 li r3,0x4000 /* 512kB / 32B */
425 mtctr r3
426 lis r3,KERNELBASE@h
4271:
428 lwz r0,0(r3)
429 addi r3,r3,0x0020 /* Go to start of next cache line */
430 bdnz 1b
431 isync
432 sync
433
434 /* Now flush those cache lines */
435 li r3,0x4000 /* 512kB / 32B */
436 mtctr r3
437 lis r3,KERNELBASE@h
4381:
439 dcbf 0,r3
440 addi r3,r3,0x0020 /* Go to start of next cache line */
441 bdnz 1b
442 sync
443
444 /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
445 mfspr r3,SPRN_HID0
446 rlwinm r3,r3,0,18,15
447 mtspr SPRN_HID0,r3
448 sync
449 isync
450 blr
451
452/* inval_enable_L1 - Invalidate and enable L1 cache
453 *
454 * Assumes L1 is already disabled and MSR:EE is off
455 *
456 * clobbers r3
457 */
458_GLOBAL(__inval_enable_L1)
459 /* Enable and then Flash inval the instruction & data cache */
460 mfspr r3,SPRN_HID0
461 ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
462 sync
463 isync
464 mtspr SPRN_HID0,r3
465 xori r3,r3, HID0_ICFI|HID0_DCI
466 mtspr SPRN_HID0,r3
467 sync
468
469 blr
470
471
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c7a799a09516..6e67b5b49ba1 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -37,7 +37,7 @@ static int legacy_serial_console = -1;
37static int __init add_legacy_port(struct device_node *np, int want_index, 37static int __init add_legacy_port(struct device_node *np, int want_index,
38 int iotype, phys_addr_t base, 38 int iotype, phys_addr_t base,
39 phys_addr_t taddr, unsigned long irq, 39 phys_addr_t taddr, unsigned long irq,
40 unsigned int flags) 40 upf_t flags)
41{ 41{
42 u32 *clk, *spd, clock = BASE_BAUD * 16; 42 u32 *clk, *spd, clock = BASE_BAUD * 16;
43 int index; 43 int index;
@@ -113,7 +113,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
113{ 113{
114 phys_addr_t addr; 114 phys_addr_t addr;
115 u32 *addrp; 115 u32 *addrp;
116 unsigned int flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; 116 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
117 117
118 /* We only support ports that have a clock frequency properly 118 /* We only support ports that have a clock frequency properly
119 * encoded in the device-tree. 119 * encoded in the device-tree.
@@ -236,6 +236,23 @@ static int __init add_legacy_pci_port(struct device_node *np,
236} 236}
237#endif 237#endif
238 238
239static void __init setup_legacy_serial_console(int console)
240{
241 struct legacy_serial_info *info =
242 &legacy_serial_infos[console];
243 void __iomem *addr;
244
245 if (info->taddr == 0)
246 return;
247 addr = ioremap(info->taddr, 0x1000);
248 if (addr == NULL)
249 return;
250 if (info->speed == 0)
251 info->speed = udbg_probe_uart_speed(addr, info->clock);
252 DBG("default console speed = %d\n", info->speed);
253 udbg_init_uart(addr, info->speed, info->clock);
254}
255
239/* 256/*
240 * This is called very early, as part of setup_system() or eventually 257 * This is called very early, as part of setup_system() or eventually
241 * setup_arch(), basically before anything else in this file. This function 258 * setup_arch(), basically before anything else in this file. This function
@@ -318,25 +335,8 @@ void __init find_legacy_serial_ports(void)
318#endif 335#endif
319 336
320 DBG("legacy_serial_console = %d\n", legacy_serial_console); 337 DBG("legacy_serial_console = %d\n", legacy_serial_console);
321 338 if (legacy_serial_console >= 0)
322 /* udbg is 64 bits only for now, that will change soon though ... */ 339 setup_legacy_serial_console(legacy_serial_console);
323 while (legacy_serial_console >= 0) {
324 struct legacy_serial_info *info =
325 &legacy_serial_infos[legacy_serial_console];
326 void __iomem *addr;
327
328 if (info->taddr == 0)
329 break;
330 addr = ioremap(info->taddr, 0x1000);
331 if (addr == NULL)
332 break;
333 if (info->speed == 0)
334 info->speed = udbg_probe_uart_speed(addr, info->clock);
335 DBG("default console speed = %d\n", info->speed);
336 udbg_init_uart(addr, info->speed, info->clock);
337 break;
338 }
339
340 DBG(" <- find_legacy_serial_port()\n"); 340 DBG(" <- find_legacy_serial_port()\n");
341} 341}
342 342
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index e789fef4eb8a..1b73508ecb2b 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -56,7 +56,7 @@ static unsigned long get_purr(void)
56 unsigned long sum_purr = 0; 56 unsigned long sum_purr = 0;
57 int cpu; 57 int cpu;
58 58
59 for_each_cpu(cpu) { 59 for_each_possible_cpu(cpu) {
60 sum_purr += lppaca[cpu].emulated_time_base; 60 sum_purr += lppaca[cpu].emulated_time_base;
61 61
62#ifdef PURR_DEBUG 62#ifdef PURR_DEBUG
@@ -222,7 +222,7 @@ static unsigned long get_purr(void)
222 int cpu; 222 int cpu;
223 struct cpu_usage *cu; 223 struct cpu_usage *cu;
224 224
225 for_each_cpu(cpu) { 225 for_each_possible_cpu(cpu) {
226 cu = &per_cpu(cpu_usage_array, cpu); 226 cu = &per_cpu(cpu_usage_array, cpu);
227 sum_purr += cu->current_tb; 227 sum_purr += cu->current_tb;
228 } 228 }
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
new file mode 100644
index 000000000000..92f4e5f64f02
--- /dev/null
+++ b/arch/powerpc/kernel/module_32.c
@@ -0,0 +1,320 @@
1/* Kernel module help for PPC.
2 Copyright (C) 2001 Rusty Russell.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18#include <linux/module.h>
19#include <linux/moduleloader.h>
20#include <linux/elf.h>
21#include <linux/vmalloc.h>
22#include <linux/fs.h>
23#include <linux/string.h>
24#include <linux/kernel.h>
25#include <linux/cache.h>
26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(fmt , ...)
31#endif
32
33LIST_HEAD(module_bug_list);
34
35void *module_alloc(unsigned long size)
36{
37 if (size == 0)
38 return NULL;
39 return vmalloc(size);
40}
41
42/* Free memory returned from module_alloc */
43void module_free(struct module *mod, void *module_region)
44{
45 vfree(module_region);
46 /* FIXME: If module_region == mod->init_region, trim exception
47 table entries. */
48}
49
50/* Count how many different relocations (different symbol, different
51 addend) */
52static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
53{
54 unsigned int i, j, ret = 0;
55
56 /* Sure, this is order(n^2), but it's usually short, and not
57 time critical */
58 for (i = 0; i < num; i++) {
59 for (j = 0; j < i; j++) {
60 /* If this addend appeared before, it's
61 already been counted */
62 if (ELF32_R_SYM(rela[i].r_info)
63 == ELF32_R_SYM(rela[j].r_info)
64 && rela[i].r_addend == rela[j].r_addend)
65 break;
66 }
67 if (j == i) ret++;
68 }
69 return ret;
70}
71
72/* Get the potential trampolines size required of the init and
73 non-init sections */
74static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
75 const Elf32_Shdr *sechdrs,
76 const char *secstrings,
77 int is_init)
78{
79 unsigned long ret = 0;
80 unsigned i;
81
82 /* Everything marked ALLOC (this includes the exported
83 symbols) */
84 for (i = 1; i < hdr->e_shnum; i++) {
85 /* If it's called *.init*, and we're not init, we're
86 not interested */
87 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
88 != is_init)
89 continue;
90
91 /* We don't want to look at debug sections. */
92 if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0)
93 continue;
94
95 if (sechdrs[i].sh_type == SHT_RELA) {
96 DEBUGP("Found relocations in section %u\n", i);
97 DEBUGP("Ptr: %p. Number: %u\n",
98 (void *)hdr + sechdrs[i].sh_offset,
99 sechdrs[i].sh_size / sizeof(Elf32_Rela));
100 ret += count_relocs((void *)hdr
101 + sechdrs[i].sh_offset,
102 sechdrs[i].sh_size
103 / sizeof(Elf32_Rela))
104 * sizeof(struct ppc_plt_entry);
105 }
106 }
107
108 return ret;
109}
110
111int module_frob_arch_sections(Elf32_Ehdr *hdr,
112 Elf32_Shdr *sechdrs,
113 char *secstrings,
114 struct module *me)
115{
116 unsigned int i;
117
118 /* Find .plt and .init.plt sections */
119 for (i = 0; i < hdr->e_shnum; i++) {
120 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
121 me->arch.init_plt_section = i;
122 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
123 me->arch.core_plt_section = i;
124 }
125 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
126 printk("Module doesn't contain .plt or .init.plt sections.\n");
127 return -ENOEXEC;
128 }
129
130 /* Override their sizes */
131 sechdrs[me->arch.core_plt_section].sh_size
132 = get_plt_size(hdr, sechdrs, secstrings, 0);
133 sechdrs[me->arch.init_plt_section].sh_size
134 = get_plt_size(hdr, sechdrs, secstrings, 1);
135 return 0;
136}
137
138int apply_relocate(Elf32_Shdr *sechdrs,
139 const char *strtab,
140 unsigned int symindex,
141 unsigned int relsec,
142 struct module *module)
143{
144 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
145 module->name);
146 return -ENOEXEC;
147}
148
149static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
150{
151 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
152 && entry->jump[1] == 0x396b0000 + (val & 0xffff))
153 return 1;
154 return 0;
155}
156
157/* Set up a trampoline in the PLT to bounce us to the distant function */
158static uint32_t do_plt_call(void *location,
159 Elf32_Addr val,
160 Elf32_Shdr *sechdrs,
161 struct module *mod)
162{
163 struct ppc_plt_entry *entry;
164
165 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
166 /* Init, or core PLT? */
167 if (location >= mod->module_core
168 && location < mod->module_core + mod->core_size)
169 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
170 else
171 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
172
173 /* Find this entry, or if that fails, the next avail. entry */
174 while (entry->jump[0]) {
175 if (entry_matches(entry, val)) return (uint32_t)entry;
176 entry++;
177 }
178
179 /* Stolen from Paul Mackerras as well... */
180 entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
181 entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
182 entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
183 entry->jump[3] = 0x4e800420; /* bctr */
184
185 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
186 return (uint32_t)entry;
187}
188
189int apply_relocate_add(Elf32_Shdr *sechdrs,
190 const char *strtab,
191 unsigned int symindex,
192 unsigned int relsec,
193 struct module *module)
194{
195 unsigned int i;
196 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
197 Elf32_Sym *sym;
198 uint32_t *location;
199 uint32_t value;
200
201 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
202 sechdrs[relsec].sh_info);
203 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
204 /* This is where to make the change */
205 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
206 + rela[i].r_offset;
207 /* This is the symbol it is referring to. Note that all
208 undefined symbols have been resolved. */
209 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
210 + ELF32_R_SYM(rela[i].r_info);
211 /* `Everything is relative'. */
212 value = sym->st_value + rela[i].r_addend;
213
214 switch (ELF32_R_TYPE(rela[i].r_info)) {
215 case R_PPC_ADDR32:
216 /* Simply set it */
217 *(uint32_t *)location = value;
218 break;
219
220 case R_PPC_ADDR16_LO:
221 /* Low half of the symbol */
222 *(uint16_t *)location = value;
223 break;
224
225 case R_PPC_ADDR16_HA:
226 /* Sign-adjusted lower 16 bits: PPC ELF ABI says:
227 (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
228 This is the same, only sane.
229 */
230 *(uint16_t *)location = (value + 0x8000) >> 16;
231 break;
232
233 case R_PPC_REL24:
234 if ((int)(value - (uint32_t)location) < -0x02000000
235 || (int)(value - (uint32_t)location) >= 0x02000000)
236 value = do_plt_call(location, value,
237 sechdrs, module);
238
239 /* Only replace bits 2 through 26 */
240 DEBUGP("REL24 value = %08X. location = %08X\n",
241 value, (uint32_t)location);
242 DEBUGP("Location before: %08X.\n",
243 *(uint32_t *)location);
244 *(uint32_t *)location
245 = (*(uint32_t *)location & ~0x03fffffc)
246 | ((value - (uint32_t)location)
247 & 0x03fffffc);
248 DEBUGP("Location after: %08X.\n",
249 *(uint32_t *)location);
250 DEBUGP("ie. jump to %08X+%08X = %08X\n",
251 *(uint32_t *)location & 0x03fffffc,
252 (uint32_t)location,
253 (*(uint32_t *)location & 0x03fffffc)
254 + (uint32_t)location);
255 break;
256
257 case R_PPC_REL32:
258 /* 32-bit relative jump. */
259 *(uint32_t *)location = value - (uint32_t)location;
260 break;
261
262 default:
263 printk("%s: unknown ADD relocation: %u\n",
264 module->name,
265 ELF32_R_TYPE(rela[i].r_info));
266 return -ENOEXEC;
267 }
268 }
269 return 0;
270}
271
272int module_finalize(const Elf_Ehdr *hdr,
273 const Elf_Shdr *sechdrs,
274 struct module *me)
275{
276 char *secstrings;
277 unsigned int i;
278
279 me->arch.bug_table = NULL;
280 me->arch.num_bugs = 0;
281
282 /* Find the __bug_table section, if present */
283 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
284 for (i = 1; i < hdr->e_shnum; i++) {
285 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
286 continue;
287 me->arch.bug_table = (void *) sechdrs[i].sh_addr;
288 me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
289 break;
290 }
291
292 /*
293 * Strictly speaking this should have a spinlock to protect against
294 * traversals, but since we only traverse on BUG()s, a spinlock
295 * could potentially lead to deadlock and thus be counter-productive.
296 */
297 list_add(&me->arch.bug_list, &module_bug_list);
298
299 return 0;
300}
301
302void module_arch_cleanup(struct module *mod)
303{
304 list_del(&mod->arch.bug_list);
305}
306
307struct bug_entry *module_find_bug(unsigned long bugaddr)
308{
309 struct mod_arch_specific *mod;
310 unsigned int i;
311 struct bug_entry *bug;
312
313 list_for_each_entry(mod, &module_bug_list, bug_list) {
314 bug = mod->bug_table;
315 for (i = 0; i < mod->num_bugs; ++i, ++bug)
316 if (bugaddr == bug->bug_addr)
317 return bug;
318 }
319 return NULL;
320}
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index fd7db8d542db..ada50aa5b600 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -160,7 +160,7 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
160 case IOC_NVRAM_GET_OFFSET: { 160 case IOC_NVRAM_GET_OFFSET: {
161 int part, offset; 161 int part, offset;
162 162
163 if (_machine != PLATFORM_POWERMAC) 163 if (!machine_is(powermac))
164 return -EINVAL; 164 return -EINVAL;
165 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) 165 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
166 return -EFAULT; 166 return -EFAULT;
@@ -174,8 +174,9 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
174 return 0; 174 return 0;
175 } 175 }
176#endif /* CONFIG_PPC_PMAC */ 176#endif /* CONFIG_PPC_PMAC */
177 default:
178 return -EINVAL;
177 } 179 }
178 return -EINVAL;
179} 180}
180 181
181struct file_operations nvram_fops = { 182struct file_operations nvram_fops = {
@@ -443,7 +444,7 @@ static int nvram_setup_partition(void)
443 * in our nvram, as Apple defined partitions use pretty much 444 * in our nvram, as Apple defined partitions use pretty much
444 * all of the space 445 * all of the space
445 */ 446 */
446 if (_machine == PLATFORM_POWERMAC) 447 if (machine_is(powermac))
447 return -ENOSPC; 448 return -ENOSPC;
448 449
449 /* see if we have an OS partition that meets our needs. 450 /* see if we have an OS partition that meets our needs.
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 5d1b708086bd..f505a8827e3e 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -56,14 +56,11 @@ struct lppaca lppaca[] = {
56 * processors. The processor VPD array needs one entry per physical 56 * processors. The processor VPD array needs one entry per physical
57 * processor (not thread). 57 * processor (not thread).
58 */ 58 */
59#define PACA_INIT_COMMON(number, start, asrr, asrv) \ 59#define PACA_INIT_COMMON(number) \
60 .lppaca_ptr = &lppaca[number], \ 60 .lppaca_ptr = &lppaca[number], \
61 .lock_token = 0x8000, \ 61 .lock_token = 0x8000, \
62 .paca_index = (number), /* Paca Index */ \ 62 .paca_index = (number), /* Paca Index */ \
63 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ 63 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
64 .stab_real = (asrr), /* Real pointer to segment table */ \
65 .stab_addr = (asrv), /* Virt pointer to segment table */ \
66 .cpu_start = (start), /* Processor start */ \
67 .hw_cpu_id = 0xffff, 64 .hw_cpu_id = 0xffff,
68 65
69#ifdef CONFIG_PPC_ISERIES 66#ifdef CONFIG_PPC_ISERIES
@@ -72,30 +69,20 @@ struct lppaca lppaca[] = {
72 69
73#define PACA_INIT(number) \ 70#define PACA_INIT(number) \
74{ \ 71{ \
75 PACA_INIT_COMMON(number, 0, 0, 0) \ 72 PACA_INIT_COMMON(number) \
76 PACA_INIT_ISERIES(number) \
77}
78
79#define BOOTCPU_PACA_INIT(number) \
80{ \
81 PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \
82 PACA_INIT_ISERIES(number) \ 73 PACA_INIT_ISERIES(number) \
83} 74}
84 75
85#else 76#else
86#define PACA_INIT(number) \ 77#define PACA_INIT(number) \
87{ \ 78{ \
88 PACA_INIT_COMMON(number, 0, 0, 0) \ 79 PACA_INIT_COMMON(number) \
89} 80}
90 81
91#define BOOTCPU_PACA_INIT(number) \
92{ \
93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \
94}
95#endif 82#endif
96 83
97struct paca_struct paca[] = { 84struct paca_struct paca[] = {
98 BOOTCPU_PACA_INIT(0), 85 PACA_INIT(0),
99#if NR_CPUS > 1 86#if NR_CPUS > 1
100 PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3), 87 PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3),
101#if NR_CPUS > 4 88#if NR_CPUS > 4
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 704c846b2b0f..b129d2e4b759 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -787,7 +787,7 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
787 * fix has to be done by making the remapping per-host and always 787 * fix has to be done by making the remapping per-host and always
788 * filling the pci_to_OF map. --BenH 788 * filling the pci_to_OF map. --BenH
789 */ 789 */
790 if (_machine == _MACH_Pmac && busnr >= 0xf0) 790 if (machine_is(powermac) && busnr >= 0xf0)
791 busnr -= 0xf0; 791 busnr -= 0xf0;
792 else 792 else
793#endif 793#endif
@@ -1728,7 +1728,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1728 * (bus 0 is HT root), we return the AGP one instead. 1728 * (bus 0 is HT root), we return the AGP one instead.
1729 */ 1729 */
1730#ifdef CONFIG_PPC_PMAC 1730#ifdef CONFIG_PPC_PMAC
1731 if (_machine == _MACH_Pmac && machine_is_compatible("MacRISC4")) 1731 if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
1732 if (bus == 0) 1732 if (bus == 0)
1733 bus = 0xf0; 1733 bus = 0xf0;
1734#endif /* CONFIG_PPC_PMAC */ 1734#endif /* CONFIG_PPC_PMAC */
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index ba92bab7cc2c..4c4449be81ce 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -78,6 +78,7 @@ int global_phb_number; /* Global phb counter */
78 78
79/* Cached ISA bridge dev. */ 79/* Cached ISA bridge dev. */
80struct pci_dev *ppc64_isabridge_dev = NULL; 80struct pci_dev *ppc64_isabridge_dev = NULL;
81EXPORT_SYMBOL_GPL(ppc64_isabridge_dev);
81 82
82static void fixup_broken_pcnet32(struct pci_dev* dev) 83static void fixup_broken_pcnet32(struct pci_dev* dev)
83{ 84{
diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c
new file mode 100644
index 000000000000..32455dfcc36b
--- /dev/null
+++ b/arch/powerpc/kernel/perfmon_fsl_booke.c
@@ -0,0 +1,222 @@
1/* kernel/perfmon_fsl_booke.c
2 * Freescale Book-E Performance Monitor code
3 *
4 * Author: Andy Fleming
5 * Copyright (c) 2004 Freescale Semiconductor, Inc
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/a.out.h>
23#include <linux/interrupt.h>
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/prctl.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/reg.h>
34#include <asm/xmon.h>
35#include <asm/pmc.h>
36
37static inline u32 get_pmlca(int ctr);
38static inline void set_pmlca(int ctr, u32 pmlca);
39
40static inline u32 get_pmlca(int ctr)
41{
42 u32 pmlca;
43
44 switch (ctr) {
45 case 0:
46 pmlca = mfpmr(PMRN_PMLCA0);
47 break;
48 case 1:
49 pmlca = mfpmr(PMRN_PMLCA1);
50 break;
51 case 2:
52 pmlca = mfpmr(PMRN_PMLCA2);
53 break;
54 case 3:
55 pmlca = mfpmr(PMRN_PMLCA3);
56 break;
57 default:
58 panic("Bad ctr number\n");
59 }
60
61 return pmlca;
62}
63
64static inline void set_pmlca(int ctr, u32 pmlca)
65{
66 switch (ctr) {
67 case 0:
68 mtpmr(PMRN_PMLCA0, pmlca);
69 break;
70 case 1:
71 mtpmr(PMRN_PMLCA1, pmlca);
72 break;
73 case 2:
74 mtpmr(PMRN_PMLCA2, pmlca);
75 break;
76 case 3:
77 mtpmr(PMRN_PMLCA3, pmlca);
78 break;
79 default:
80 panic("Bad ctr number\n");
81 }
82}
83
84void init_pmc_stop(int ctr)
85{
86 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
87 PMLCA_FCM1 | PMLCA_FCM0);
88 u32 pmlcb = 0;
89
90 switch (ctr) {
91 case 0:
92 mtpmr(PMRN_PMLCA0, pmlca);
93 mtpmr(PMRN_PMLCB0, pmlcb);
94 break;
95 case 1:
96 mtpmr(PMRN_PMLCA1, pmlca);
97 mtpmr(PMRN_PMLCB1, pmlcb);
98 break;
99 case 2:
100 mtpmr(PMRN_PMLCA2, pmlca);
101 mtpmr(PMRN_PMLCB2, pmlcb);
102 break;
103 case 3:
104 mtpmr(PMRN_PMLCA3, pmlca);
105 mtpmr(PMRN_PMLCB3, pmlcb);
106 break;
107 default:
108 panic("Bad ctr number!\n");
109 }
110}
111
112void set_pmc_event(int ctr, int event)
113{
114 u32 pmlca;
115
116 pmlca = get_pmlca(ctr);
117
118 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
119 ((event << PMLCA_EVENT_SHIFT) &
120 PMLCA_EVENT_MASK);
121
122 set_pmlca(ctr, pmlca);
123}
124
125void set_pmc_user_kernel(int ctr, int user, int kernel)
126{
127 u32 pmlca;
128
129 pmlca = get_pmlca(ctr);
130
131 if(user)
132 pmlca &= ~PMLCA_FCU;
133 else
134 pmlca |= PMLCA_FCU;
135
136 if(kernel)
137 pmlca &= ~PMLCA_FCS;
138 else
139 pmlca |= PMLCA_FCS;
140
141 set_pmlca(ctr, pmlca);
142}
143
144void set_pmc_marked(int ctr, int mark0, int mark1)
145{
146 u32 pmlca = get_pmlca(ctr);
147
148 if(mark0)
149 pmlca &= ~PMLCA_FCM0;
150 else
151 pmlca |= PMLCA_FCM0;
152
153 if(mark1)
154 pmlca &= ~PMLCA_FCM1;
155 else
156 pmlca |= PMLCA_FCM1;
157
158 set_pmlca(ctr, pmlca);
159}
160
161void pmc_start_ctr(int ctr, int enable)
162{
163 u32 pmlca = get_pmlca(ctr);
164
165 pmlca &= ~PMLCA_FC;
166
167 if (enable)
168 pmlca |= PMLCA_CE;
169 else
170 pmlca &= ~PMLCA_CE;
171
172 set_pmlca(ctr, pmlca);
173}
174
175void pmc_start_ctrs(int enable)
176{
177 u32 pmgc0 = mfpmr(PMRN_PMGC0);
178
179 pmgc0 &= ~PMGC0_FAC;
180 pmgc0 |= PMGC0_FCECE;
181
182 if (enable)
183 pmgc0 |= PMGC0_PMIE;
184 else
185 pmgc0 &= ~PMGC0_PMIE;
186
187 mtpmr(PMRN_PMGC0, pmgc0);
188}
189
190void pmc_stop_ctrs(void)
191{
192 u32 pmgc0 = mfpmr(PMRN_PMGC0);
193
194 pmgc0 |= PMGC0_FAC;
195
196 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
197
198 mtpmr(PMRN_PMGC0, pmgc0);
199}
200
201void dump_pmcs(void)
202{
203 printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
204 printk("pmc\t\tpmlca\t\tpmlcb\n");
205 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
206 mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
207 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
208 mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
209 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
210 mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
211 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
212 mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
213}
214
215EXPORT_SYMBOL(init_pmc_stop);
216EXPORT_SYMBOL(set_pmc_event);
217EXPORT_SYMBOL(set_pmc_user_kernel);
218EXPORT_SYMBOL(set_pmc_marked);
219EXPORT_SYMBOL(pmc_start_ctr);
220EXPORT_SYMBOL(pmc_start_ctrs);
221EXPORT_SYMBOL(pmc_stop_ctrs);
222EXPORT_SYMBOL(dump_pmcs);
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 7ba42a405f41..3c2cf661f6d9 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26#include <asm/machdep.h>
26#include <asm/vdso_datapage.h> 27#include <asm/vdso_datapage.h>
27#include <asm/rtas.h> 28#include <asm/rtas.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -51,7 +52,7 @@ static int __init proc_ppc64_create(void)
51 if (!root) 52 if (!root)
52 return 1; 53 return 1;
53 54
54 if (!(platform_is_pseries() || _machine == PLATFORM_CELL)) 55 if (!machine_is(pseries) && !machine_is(cell))
55 return 0; 56 return 0;
56 57
57 if (!proc_mkdir("rtas", root)) 58 if (!proc_mkdir("rtas", root))
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f698aa77127e..706090c99f47 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -45,6 +45,7 @@
45#include <asm/prom.h> 45#include <asm/prom.h>
46#include <asm/machdep.h> 46#include <asm/machdep.h>
47#include <asm/time.h> 47#include <asm/time.h>
48#include <asm/syscalls.h>
48#ifdef CONFIG_PPC64 49#ifdef CONFIG_PPC64
49#include <asm/firmware.h> 50#include <asm/firmware.h>
50#endif 51#endif
@@ -362,7 +363,11 @@ static void show_instructions(struct pt_regs *regs)
362 if (!(i % 8)) 363 if (!(i % 8))
363 printk("\n"); 364 printk("\n");
364 365
365 if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) { 366 /* We use __get_user here *only* to avoid an OOPS on a
367 * bad address because the pc *should* only be a
368 * kernel address.
369 */
370 if (BAD_PC(pc) || __get_user(instr, (unsigned int __user *)pc)) {
366 printk("XXXXXXXX "); 371 printk("XXXXXXXX ");
367 } else { 372 } else {
368 if (regs->nip == pc) 373 if (regs->nip == pc)
@@ -765,7 +770,7 @@ out:
765 return error; 770 return error;
766} 771}
767 772
768static int validate_sp(unsigned long sp, struct task_struct *p, 773int validate_sp(unsigned long sp, struct task_struct *p,
769 unsigned long nbytes) 774 unsigned long nbytes)
770{ 775{
771 unsigned long stack_page = (unsigned long)task_stack_page(p); 776 unsigned long stack_page = (unsigned long)task_stack_page(p);
@@ -803,6 +808,8 @@ static int validate_sp(unsigned long sp, struct task_struct *p,
803#define FRAME_MARKER 2 808#define FRAME_MARKER 2
804#endif 809#endif
805 810
811EXPORT_SYMBOL(validate_sp);
812
806unsigned long get_wchan(struct task_struct *p) 813unsigned long get_wchan(struct task_struct *p)
807{ 814{
808 unsigned long ip, sp; 815 unsigned long ip, sp;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d63cd562d9d5..4336390bcf34 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -383,14 +383,14 @@ static int __devinit finish_node_interrupts(struct device_node *np,
383 /* Apple uses bits in there in a different way, let's 383 /* Apple uses bits in there in a different way, let's
384 * only keep the real sense bit on macs 384 * only keep the real sense bit on macs
385 */ 385 */
386 if (_machine == PLATFORM_POWERMAC) 386 if (machine_is(powermac))
387 sense &= 0x1; 387 sense &= 0x1;
388 np->intrs[intrcount].sense = map_mpic_senses[sense]; 388 np->intrs[intrcount].sense = map_mpic_senses[sense];
389 } 389 }
390 390
391#ifdef CONFIG_PPC64 391#ifdef CONFIG_PPC64
392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
393 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) { 393 if (machine_is(powermac) && ic && ic->parent) {
394 char *name = get_property(ic->parent, "name", NULL); 394 char *name = get_property(ic->parent, "name", NULL);
395 if (name && !strcmp(name, "u3")) 395 if (name && !strcmp(name, "u3"))
396 np->intrs[intrcount].line += 128; 396 np->intrs[intrcount].line += 128;
@@ -570,6 +570,18 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
570 return rc; 570 return rc;
571} 571}
572 572
573unsigned long __init of_get_flat_dt_root(void)
574{
575 unsigned long p = ((unsigned long)initial_boot_params) +
576 initial_boot_params->off_dt_struct;
577
578 while(*((u32 *)p) == OF_DT_NOP)
579 p += 4;
580 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
581 p += 4;
582 return _ALIGN(p + strlen((char *)p) + 1, 4);
583}
584
573/** 585/**
574 * This function can be used within scan_flattened_dt callback to get 586 * This function can be used within scan_flattened_dt callback to get
575 * access to properties 587 * access to properties
@@ -612,6 +624,25 @@ void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
612 } while(1); 624 } while(1);
613} 625}
614 626
627int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
628{
629 const char* cp;
630 unsigned long cplen, l;
631
632 cp = of_get_flat_dt_prop(node, "compatible", &cplen);
633 if (cp == NULL)
634 return 0;
635 while (cplen > 0) {
636 if (strncasecmp(cp, compat, strlen(compat)) == 0)
637 return 1;
638 l = strlen(cp) + 1;
639 cp += l;
640 cplen -= l;
641 }
642
643 return 0;
644}
645
615static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 646static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
616 unsigned long align) 647 unsigned long align)
617{ 648{
@@ -686,7 +717,7 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
686#ifdef DEBUG 717#ifdef DEBUG
687 if ((strlen(p) + l + 1) != allocl) { 718 if ((strlen(p) + l + 1) != allocl) {
688 DBG("%s: p: %d, l: %d, a: %d\n", 719 DBG("%s: p: %d, l: %d, a: %d\n",
689 pathp, strlen(p), l, allocl); 720 pathp, (int)strlen(p), l, allocl);
690 } 721 }
691#endif 722#endif
692 p += strlen(p); 723 p += strlen(p);
@@ -854,35 +885,73 @@ void __init unflatten_device_tree(void)
854 DBG(" <- unflatten_device_tree()\n"); 885 DBG(" <- unflatten_device_tree()\n");
855} 886}
856 887
857
858static int __init early_init_dt_scan_cpus(unsigned long node, 888static int __init early_init_dt_scan_cpus(unsigned long node,
859 const char *uname, int depth, void *data) 889 const char *uname, int depth,
890 void *data)
860{ 891{
892 static int logical_cpuid = 0;
893 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
894#ifdef CONFIG_ALTIVEC
861 u32 *prop; 895 u32 *prop;
862 unsigned long size; 896#endif
863 char *type = of_get_flat_dt_prop(node, "device_type", &size); 897 u32 *intserv;
898 int i, nthreads;
899 unsigned long len;
900 int found = 0;
864 901
865 /* We are scanning "cpu" nodes only */ 902 /* We are scanning "cpu" nodes only */
866 if (type == NULL || strcmp(type, "cpu") != 0) 903 if (type == NULL || strcmp(type, "cpu") != 0)
867 return 0; 904 return 0;
868 905
869 boot_cpuid = 0; 906 /* Get physical cpuid */
870 boot_cpuid_phys = 0; 907 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
871 if (initial_boot_params && initial_boot_params->version >= 2) { 908 if (intserv) {
872 /* version 2 of the kexec param format adds the phys cpuid 909 nthreads = len / sizeof(int);
873 * of booted proc.
874 */
875 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
876 } else { 910 } else {
877 /* Check if it's the boot-cpu, set it's hw index now */ 911 intserv = of_get_flat_dt_prop(node, "reg", NULL);
878 if (of_get_flat_dt_prop(node, 912 nthreads = 1;
913 }
914
915 /*
916 * Now see if any of these threads match our boot cpu.
917 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
918 */
919 for (i = 0; i < nthreads; i++) {
920 /*
921 * version 2 of the kexec param format adds the phys cpuid of
922 * booted proc.
923 */
924 if (initial_boot_params && initial_boot_params->version >= 2) {
925 if (intserv[i] ==
926 initial_boot_params->boot_cpuid_phys) {
927 found = 1;
928 break;
929 }
930 } else {
931 /*
932 * Check if it's the boot-cpu, set it's hw index now,
933 * unfortunately this format did not support booting
934 * off secondary threads.
935 */
936 if (of_get_flat_dt_prop(node,
879 "linux,boot-cpu", NULL) != NULL) { 937 "linux,boot-cpu", NULL) != NULL) {
880 prop = of_get_flat_dt_prop(node, "reg", NULL); 938 found = 1;
881 if (prop != NULL) 939 break;
882 boot_cpuid_phys = *prop; 940 }
883 } 941 }
942
943#ifdef CONFIG_SMP
944 /* logical cpu id is always 0 on UP kernels */
945 logical_cpuid++;
946#endif
947 }
948
949 if (found) {
950 DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
951 intserv[i]);
952 boot_cpuid = logical_cpuid;
953 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
884 } 954 }
885 set_hard_smp_processor_id(0, boot_cpuid_phys);
886 955
887#ifdef CONFIG_ALTIVEC 956#ifdef CONFIG_ALTIVEC
888 /* Check if we have a VMX and eventually update CPU features */ 957 /* Check if we have a VMX and eventually update CPU features */
@@ -901,16 +970,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
901#endif /* CONFIG_ALTIVEC */ 970#endif /* CONFIG_ALTIVEC */
902 971
903#ifdef CONFIG_PPC_PSERIES 972#ifdef CONFIG_PPC_PSERIES
904 /* 973 if (nthreads > 1)
905 * Check for an SMT capable CPU and set the CPU feature. We do
906 * this by looking at the size of the ibm,ppc-interrupt-server#s
907 * property
908 */
909 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
910 &size);
911 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
912 if (prop && ((size / sizeof(u32)) > 1))
913 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 974 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
975 else
976 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
914#endif 977#endif
915 978
916 return 0; 979 return 0;
@@ -919,7 +982,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
919static int __init early_init_dt_scan_chosen(unsigned long node, 982static int __init early_init_dt_scan_chosen(unsigned long node,
920 const char *uname, int depth, void *data) 983 const char *uname, int depth, void *data)
921{ 984{
922 u32 *prop;
923 unsigned long *lprop; 985 unsigned long *lprop;
924 unsigned long l; 986 unsigned long l;
925 char *p; 987 char *p;
@@ -930,14 +992,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
930 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 992 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
931 return 0; 993 return 0;
932 994
933 /* get platform type */
934 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
935 if (prop == NULL)
936 return 0;
937#ifdef CONFIG_PPC_MULTIPLATFORM
938 _machine = *prop;
939#endif
940
941#ifdef CONFIG_PPC64 995#ifdef CONFIG_PPC64
942 /* check if iommu is forced on or off */ 996 /* check if iommu is forced on or off */
943 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 997 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@@ -964,15 +1018,15 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
964 * set of RTAS infos now if available 1018 * set of RTAS infos now if available
965 */ 1019 */
966 { 1020 {
967 u64 *basep, *entryp; 1021 u64 *basep, *entryp, *sizep;
968 1022
969 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); 1023 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
970 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); 1024 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
971 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); 1025 sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
972 if (basep && entryp && prop) { 1026 if (basep && entryp && sizep) {
973 rtas.base = *basep; 1027 rtas.base = *basep;
974 rtas.entry = *entryp; 1028 rtas.entry = *entryp;
975 rtas.size = *prop; 1029 rtas.size = *sizep;
976 } 1030 }
977 } 1031 }
978#endif /* CONFIG_PPC_RTAS */ 1032#endif /* CONFIG_PPC_RTAS */
@@ -1001,25 +1055,13 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1001 1055
1002 if (strstr(cmd_line, "mem=")) { 1056 if (strstr(cmd_line, "mem=")) {
1003 char *p, *q; 1057 char *p, *q;
1004 unsigned long maxmem = 0;
1005 1058
1006 for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { 1059 for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
1007 q = p + 4; 1060 q = p + 4;
1008 if (p > cmd_line && p[-1] != ' ') 1061 if (p > cmd_line && p[-1] != ' ')
1009 continue; 1062 continue;
1010 maxmem = simple_strtoul(q, &q, 0); 1063 memory_limit = memparse(q, &q);
1011 if (*q == 'k' || *q == 'K') {
1012 maxmem <<= 10;
1013 ++q;
1014 } else if (*q == 'm' || *q == 'M') {
1015 maxmem <<= 20;
1016 ++q;
1017 } else if (*q == 'g' || *q == 'G') {
1018 maxmem <<= 30;
1019 ++q;
1020 }
1021 } 1064 }
1022 memory_limit = maxmem;
1023 } 1065 }
1024 1066
1025 /* break now */ 1067 /* break now */
@@ -1755,7 +1797,7 @@ static int of_finish_dynamic_node(struct device_node *node)
1755 /* We don't support that function on PowerMac, at least 1797 /* We don't support that function on PowerMac, at least
1756 * not yet 1798 * not yet
1757 */ 1799 */
1758 if (_machine == PLATFORM_POWERMAC) 1800 if (machine_is(powermac))
1759 return -ENODEV; 1801 return -ENODEV;
1760 1802
1761 /* fix up new node's linux_phandle field */ 1803 /* fix up new node's linux_phandle field */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 813c2cd194c2..d66c5e77fcff 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -180,6 +180,16 @@ static unsigned long __initdata prom_tce_alloc_start;
180static unsigned long __initdata prom_tce_alloc_end; 180static unsigned long __initdata prom_tce_alloc_end;
181#endif 181#endif
182 182
183/* Platforms codes are now obsolete in the kernel. Now only used within this
184 * file and ultimately gone too. Feel free to change them if you need, they
185 * are not shared with anything outside of this file anymore
186 */
187#define PLATFORM_PSERIES 0x0100
188#define PLATFORM_PSERIES_LPAR 0x0101
189#define PLATFORM_LPAR 0x0001
190#define PLATFORM_POWERMAC 0x0400
191#define PLATFORM_GENERIC 0x0500
192
183static int __initdata of_platform; 193static int __initdata of_platform;
184 194
185static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; 195static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
@@ -397,6 +407,11 @@ static void __init __attribute__((noreturn)) prom_panic(const char *reason)
397 reason = PTRRELOC(reason); 407 reason = PTRRELOC(reason);
398#endif 408#endif
399 prom_print(reason); 409 prom_print(reason);
410 /* Do not call exit because it clears the screen on pmac
411 * it also causes some sort of double-fault on early pmacs */
412 if (RELOC(of_platform) == PLATFORM_POWERMAC)
413 asm("trap\n");
414
400 /* ToDo: should put up an SRC here on p/iSeries */ 415 /* ToDo: should put up an SRC here on p/iSeries */
401 call_prom("exit", 0, 0); 416 call_prom("exit", 0, 0);
402 417
@@ -1487,7 +1502,10 @@ static int __init prom_find_machine_type(void)
1487 int len, i = 0; 1502 int len, i = 0;
1488#ifdef CONFIG_PPC64 1503#ifdef CONFIG_PPC64
1489 phandle rtas; 1504 phandle rtas;
1505 int x;
1490#endif 1506#endif
1507
1508 /* Look for a PowerMac */
1491 len = prom_getprop(_prom->root, "compatible", 1509 len = prom_getprop(_prom->root, "compatible",
1492 compat, sizeof(compat)-1); 1510 compat, sizeof(compat)-1);
1493 if (len > 0) { 1511 if (len > 0) {
@@ -1500,28 +1518,36 @@ static int __init prom_find_machine_type(void)
1500 if (strstr(p, RELOC("Power Macintosh")) || 1518 if (strstr(p, RELOC("Power Macintosh")) ||
1501 strstr(p, RELOC("MacRISC"))) 1519 strstr(p, RELOC("MacRISC")))
1502 return PLATFORM_POWERMAC; 1520 return PLATFORM_POWERMAC;
1503#ifdef CONFIG_PPC64
1504 if (strstr(p, RELOC("Momentum,Maple")))
1505 return PLATFORM_MAPLE;
1506 if (strstr(p, RELOC("IBM,CPB")))
1507 return PLATFORM_CELL;
1508#endif
1509 i += sl + 1; 1521 i += sl + 1;
1510 } 1522 }
1511 } 1523 }
1512#ifdef CONFIG_PPC64 1524#ifdef CONFIG_PPC64
1525 /* If not a mac, try to figure out if it's an IBM pSeries or any other
1526 * PAPR compliant platform. We assume it is if :
1527 * - /device_type is "chrp" (please, do NOT use that for future
1528 * non-IBM designs !
1529 * - it has /rtas
1530 */
1531 len = prom_getprop(_prom->root, "model",
1532 compat, sizeof(compat)-1);
1533 if (len <= 0)
1534 return PLATFORM_GENERIC;
1535 compat[len] = 0;
1536 if (strcmp(compat, "chrp"))
1537 return PLATFORM_GENERIC;
1538
1513 /* Default to pSeries. We need to know if we are running LPAR */ 1539 /* Default to pSeries. We need to know if we are running LPAR */
1514 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1540 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1515 if (PHANDLE_VALID(rtas)) { 1541 if (!PHANDLE_VALID(rtas))
1516 int x = prom_getproplen(rtas, "ibm,hypertas-functions"); 1542 return PLATFORM_GENERIC;
1517 if (x != PROM_ERROR) { 1543 x = prom_getproplen(rtas, "ibm,hypertas-functions");
1518 prom_printf("Hypertas detected, assuming LPAR !\n"); 1544 if (x != PROM_ERROR) {
1519 return PLATFORM_PSERIES_LPAR; 1545 prom_printf("Hypertas detected, assuming LPAR !\n");
1520 } 1546 return PLATFORM_PSERIES_LPAR;
1521 } 1547 }
1522 return PLATFORM_PSERIES; 1548 return PLATFORM_PSERIES;
1523#else 1549#else
1524 return PLATFORM_CHRP; 1550 return PLATFORM_GENERIC;
1525#endif 1551#endif
1526} 1552}
1527 1553
@@ -2029,7 +2055,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2029{ 2055{
2030 struct prom_t *_prom; 2056 struct prom_t *_prom;
2031 unsigned long hdr; 2057 unsigned long hdr;
2032 u32 getprop_rval;
2033 unsigned long offset = reloc_offset(); 2058 unsigned long offset = reloc_offset();
2034 2059
2035#ifdef CONFIG_PPC32 2060#ifdef CONFIG_PPC32
@@ -2060,6 +2085,12 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2060 */ 2085 */
2061 prom_init_stdout(); 2086 prom_init_stdout();
2062 2087
2088 /*
2089 * Get default machine type. At this point, we do not differentiate
2090 * between pSeries SMP and pSeries LPAR
2091 */
2092 RELOC(of_platform) = prom_find_machine_type();
2093
2063 /* Bail if this is a kdump kernel. */ 2094 /* Bail if this is a kdump kernel. */
2064 if (PHYSICAL_START > 0) 2095 if (PHYSICAL_START > 0)
2065 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 2096 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
@@ -2069,15 +2100,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2069 */ 2100 */
2070 prom_check_initrd(r3, r4); 2101 prom_check_initrd(r3, r4);
2071 2102
2072 /*
2073 * Get default machine type. At this point, we do not differentiate
2074 * between pSeries SMP and pSeries LPAR
2075 */
2076 RELOC(of_platform) = prom_find_machine_type();
2077 getprop_rval = RELOC(of_platform);
2078 prom_setprop(_prom->chosen, "/chosen", "linux,platform",
2079 &getprop_rval, sizeof(getprop_rval));
2080
2081#ifdef CONFIG_PPC_PSERIES 2103#ifdef CONFIG_PPC_PSERIES
2082 /* 2104 /*
2083 * On pSeries, inform the firmware about our capabilities 2105 * On pSeries, inform the firmware about our capabilities
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 1f03fb28cc0a..456286cf1d14 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -257,7 +257,7 @@ static int __init proc_rtas_init(void)
257{ 257{
258 struct proc_dir_entry *entry; 258 struct proc_dir_entry *entry;
259 259
260 if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR) 260 if (!machine_is(pseries))
261 return 1; 261 return 1;
262 262
263 rtas_node = of_find_node_by_name(NULL, "rtas"); 263 rtas_node = of_find_node_by_name(NULL, "rtas");
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index b5b2add7ad1e..06636c927a7e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,6 +25,7 @@
25#include <asm/hvcall.h> 25#include <asm/hvcall.h>
26#include <asm/semaphore.h> 26#include <asm/semaphore.h>
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/firmware.h>
28#include <asm/page.h> 29#include <asm/page.h>
29#include <asm/param.h> 30#include <asm/param.h>
30#include <asm/system.h> 31#include <asm/system.h>
@@ -32,6 +33,7 @@
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
33#include <asm/lmb.h> 34#include <asm/lmb.h>
34#include <asm/udbg.h> 35#include <asm/udbg.h>
36#include <asm/syscalls.h>
35 37
36struct rtas_t rtas = { 38struct rtas_t rtas = {
37 .lock = SPIN_LOCK_UNLOCKED 39 .lock = SPIN_LOCK_UNLOCKED
@@ -591,7 +593,7 @@ static void rtas_percpu_suspend_me(void *info)
591 data->waiting = 0; 593 data->waiting = 0;
592 data->args->args[data->args->nargs] = 594 data->args->args[data->args->nargs] =
593 rtas_call(ibm_suspend_me_token, 0, 1, NULL); 595 rtas_call(ibm_suspend_me_token, 0, 1, NULL);
594 for_each_cpu(i) 596 for_each_possible_cpu(i)
595 plpar_hcall_norets(H_PROD,i); 597 plpar_hcall_norets(H_PROD,i);
596 } else { 598 } else {
597 data->waiting = -EBUSY; 599 data->waiting = -EBUSY;
@@ -624,7 +626,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
624 /* Prod each CPU. This won't hurt, and will wake 626 /* Prod each CPU. This won't hurt, and will wake
625 * anyone we successfully put to sleep with H_Join 627 * anyone we successfully put to sleep with H_Join
626 */ 628 */
627 for_each_cpu(i) 629 for_each_possible_cpu(i)
628 plpar_hcall_norets(H_PROD, i); 630 plpar_hcall_norets(H_PROD, i);
629 631
630 return data.waiting; 632 return data.waiting;
@@ -767,7 +769,7 @@ void __init rtas_initialize(void)
767 * the stop-self token if any 769 * the stop-self token if any
768 */ 770 */
769#ifdef CONFIG_PPC64 771#ifdef CONFIG_PPC64
770 if (_machine == PLATFORM_PSERIES_LPAR) { 772 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
771 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 773 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
772 ibm_suspend_me_token = rtas_token("ibm,suspend-me"); 774 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
773 } 775 }
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index c1d62bf11f29..c607f3b9ca17 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -9,6 +9,9 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12
13#undef DEBUG
14
12#include <linux/config.h> 15#include <linux/config.h>
13#include <linux/module.h> 16#include <linux/module.h>
14#include <linux/string.h> 17#include <linux/string.h>
@@ -41,6 +44,7 @@
41#include <asm/time.h> 44#include <asm/time.h>
42#include <asm/cputable.h> 45#include <asm/cputable.h>
43#include <asm/sections.h> 46#include <asm/sections.h>
47#include <asm/firmware.h>
44#include <asm/btext.h> 48#include <asm/btext.h>
45#include <asm/nvram.h> 49#include <asm/nvram.h>
46#include <asm/setup.h> 50#include <asm/setup.h>
@@ -56,8 +60,6 @@
56 60
57#include "setup.h" 61#include "setup.h"
58 62
59#undef DEBUG
60
61#ifdef DEBUG 63#ifdef DEBUG
62#include <asm/udbg.h> 64#include <asm/udbg.h>
63#define DBG(fmt...) udbg_printf(fmt) 65#define DBG(fmt...) udbg_printf(fmt)
@@ -65,10 +67,12 @@
65#define DBG(fmt...) 67#define DBG(fmt...)
66#endif 68#endif
67 69
68#ifdef CONFIG_PPC_MULTIPLATFORM 70/* The main machine-dep calls structure
69int _machine = 0; 71 */
70EXPORT_SYMBOL(_machine); 72struct machdep_calls ppc_md;
71#endif 73EXPORT_SYMBOL(ppc_md);
74struct machdep_calls *machine_id;
75EXPORT_SYMBOL(machine_id);
72 76
73unsigned long klimit = (unsigned long) _end; 77unsigned long klimit = (unsigned long) _end;
74 78
@@ -168,7 +172,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
168 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 172 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
169#endif /* CONFIG_SMP && CONFIG_PPC32 */ 173#endif /* CONFIG_SMP && CONFIG_PPC32 */
170 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 174 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
171 175 if (ppc_md.name)
176 seq_printf(m, "platform\t: %s\n", ppc_md.name);
172 if (ppc_md.show_cpuinfo != NULL) 177 if (ppc_md.show_cpuinfo != NULL)
173 ppc_md.show_cpuinfo(m); 178 ppc_md.show_cpuinfo(m);
174 179
@@ -352,12 +357,13 @@ void __init check_for_initrd(void)
352 * must be called before using this. 357 * must be called before using this.
353 * 358 *
354 * While we're here, we may as well set the "physical" cpu ids in the paca. 359 * While we're here, we may as well set the "physical" cpu ids in the paca.
360 *
361 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
355 */ 362 */
356void __init smp_setup_cpu_maps(void) 363void __init smp_setup_cpu_maps(void)
357{ 364{
358 struct device_node *dn = NULL; 365 struct device_node *dn = NULL;
359 int cpu = 0; 366 int cpu = 0;
360 int swap_cpuid = 0;
361 367
362 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 368 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
363 int *intserv; 369 int *intserv;
@@ -376,30 +382,17 @@ void __init smp_setup_cpu_maps(void)
376 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 382 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
377 cpu_set(cpu, cpu_present_map); 383 cpu_set(cpu, cpu_present_map);
378 set_hard_smp_processor_id(cpu, intserv[j]); 384 set_hard_smp_processor_id(cpu, intserv[j]);
379
380 if (intserv[j] == boot_cpuid_phys)
381 swap_cpuid = cpu;
382 cpu_set(cpu, cpu_possible_map); 385 cpu_set(cpu, cpu_possible_map);
383 cpu++; 386 cpu++;
384 } 387 }
385 } 388 }
386 389
387 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
388 * boot cpu is logical 0.
389 */
390 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
391 u32 tmp;
392 tmp = get_hard_smp_processor_id(0);
393 set_hard_smp_processor_id(0, boot_cpuid_phys);
394 set_hard_smp_processor_id(swap_cpuid, tmp);
395 }
396
397#ifdef CONFIG_PPC64 390#ifdef CONFIG_PPC64
398 /* 391 /*
399 * On pSeries LPAR, we need to know how many cpus 392 * On pSeries LPAR, we need to know how many cpus
400 * could possibly be added to this partition. 393 * could possibly be added to this partition.
401 */ 394 */
402 if (_machine == PLATFORM_PSERIES_LPAR && 395 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
403 (dn = of_find_node_by_path("/rtas"))) { 396 (dn = of_find_node_by_path("/rtas"))) {
404 int num_addr_cell, num_size_cell, maxcpus; 397 int num_addr_cell, num_size_cell, maxcpus;
405 unsigned int *ireg; 398 unsigned int *ireg;
@@ -438,7 +431,7 @@ void __init smp_setup_cpu_maps(void)
438 /* 431 /*
439 * Do the sibling map; assume only two threads per processor. 432 * Do the sibling map; assume only two threads per processor.
440 */ 433 */
441 for_each_cpu(cpu) { 434 for_each_possible_cpu(cpu) {
442 cpu_set(cpu, cpu_sibling_map[cpu]); 435 cpu_set(cpu, cpu_sibling_map[cpu]);
443 if (cpu_has_feature(CPU_FTR_SMT)) 436 if (cpu_has_feature(CPU_FTR_SMT))
444 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 437 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
@@ -468,3 +461,34 @@ static int __init early_xmon(char *p)
468} 461}
469early_param("xmon", early_xmon); 462early_param("xmon", early_xmon);
470#endif 463#endif
464
465void probe_machine(void)
466{
467 extern struct machdep_calls __machine_desc_start;
468 extern struct machdep_calls __machine_desc_end;
469
470 /*
471 * Iterate all ppc_md structures until we find the proper
472 * one for the current machine type
473 */
474 DBG("Probing machine type ...\n");
475
476 for (machine_id = &__machine_desc_start;
477 machine_id < &__machine_desc_end;
478 machine_id++) {
479 DBG(" %s ...", machine_id->name);
480 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
481 if (ppc_md.probe()) {
482 DBG(" match !\n");
483 break;
484 }
485 DBG("\n");
486 }
487 /* What can we do if we didn't find ? */
488 if (machine_id >= &__machine_desc_end) {
489 DBG("No suitable machine found !\n");
490 for (;;);
491 }
492
493 printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
494}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index dc2770df25b3..a72bf5dceeee 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -53,9 +53,6 @@
53extern void platform_init(void); 53extern void platform_init(void);
54extern void bootx_init(unsigned long r4, unsigned long phys); 54extern void bootx_init(unsigned long r4, unsigned long phys);
55 55
56extern void ppc6xx_idle(void);
57extern void power4_idle(void);
58
59boot_infos_t *boot_infos; 56boot_infos_t *boot_infos;
60struct ide_machdep_calls ppc_ide_md; 57struct ide_machdep_calls ppc_ide_md;
61 58
@@ -70,10 +67,6 @@ unsigned int DMA_MODE_WRITE;
70int have_of = 1; 67int have_of = 1;
71 68
72#ifdef CONFIG_PPC_MULTIPLATFORM 69#ifdef CONFIG_PPC_MULTIPLATFORM
73extern void prep_init(void);
74extern void pmac_init(void);
75extern void chrp_init(void);
76
77dev_t boot_dev; 70dev_t boot_dev;
78#endif /* CONFIG_PPC_MULTIPLATFORM */ 71#endif /* CONFIG_PPC_MULTIPLATFORM */
79 72
@@ -85,9 +78,6 @@ unsigned long SYSRQ_KEY = 0x54;
85unsigned long vgacon_remap_base; 78unsigned long vgacon_remap_base;
86#endif 79#endif
87 80
88struct machdep_calls ppc_md;
89EXPORT_SYMBOL(ppc_md);
90
91/* 81/*
92 * These are used in binfmt_elf.c to put aux entries on the stack 82 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started. 83 * for each elf executable being started.
@@ -111,7 +101,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
111 101
112 /* First zero the BSS -- use memset_io, some platforms don't have 102 /* First zero the BSS -- use memset_io, some platforms don't have
113 * caches on yet */ 103 * caches on yet */
114 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); 104 memset_io((void __iomem *)PTRRELOC(&__bss_start), 0, _end - __bss_start);
115 105
116 /* 106 /*
117 * Identify the CPU type and fix up code sections 107 * Identify the CPU type and fix up code sections
@@ -123,48 +113,6 @@ unsigned long __init early_init(unsigned long dt_ptr)
123 return KERNELBASE + offset; 113 return KERNELBASE + offset;
124} 114}
125 115
126#ifdef CONFIG_PPC_MULTIPLATFORM
127/*
128 * The PPC_MULTIPLATFORM version of platform_init...
129 */
130void __init platform_init(void)
131{
132 /* if we didn't get any bootinfo telling us what we are... */
133 if (_machine == 0) {
134 /* prep boot loader tells us if we're prep or not */
135 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
136 _machine = _MACH_prep;
137 }
138
139#ifdef CONFIG_PPC_PREP
140 /* not much more to do here, if prep */
141 if (_machine == _MACH_prep) {
142 prep_init();
143 return;
144 }
145#endif
146
147#ifdef CONFIG_ADB
148 if (strstr(cmd_line, "adb_sync")) {
149 extern int __adb_probe_sync;
150 __adb_probe_sync = 1;
151 }
152#endif /* CONFIG_ADB */
153
154 switch (_machine) {
155#ifdef CONFIG_PPC_PMAC
156 case _MACH_Pmac:
157 pmac_init();
158 break;
159#endif
160#ifdef CONFIG_PPC_CHRP
161 case _MACH_chrp:
162 chrp_init();
163 break;
164#endif
165 }
166}
167#endif
168 116
169/* 117/*
170 * Find out what kind of machine we're on and save any data we need 118 * Find out what kind of machine we're on and save any data we need
@@ -190,11 +138,17 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
190 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); 138 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
191#endif /* CONFIG_CMDLINE */ 139#endif /* CONFIG_CMDLINE */
192 140
193 /* Base init based on machine type */ 141#ifdef CONFIG_PPC_MULTIPLATFORM
142 probe_machine();
143#else
144 /* Base init based on machine type. Obsoloete, please kill ! */
194 platform_init(); 145 platform_init();
146#endif
195 147
196#ifdef CONFIG_6xx 148#ifdef CONFIG_6xx
197 ppc_md.power_save = ppc6xx_idle; 149 if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
150 cpu_has_feature(CPU_FTR_CAN_NAP))
151 ppc_md.power_save = ppc6xx_idle;
198#endif 152#endif
199 153
200 if (ppc_md.progress) 154 if (ppc_md.progress)
@@ -272,7 +226,7 @@ int __init ppc_init(void)
272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 226 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
273 227
274 /* register CPU devices */ 228 /* register CPU devices */
275 for_each_cpu(i) 229 for_each_possible_cpu(i)
276 register_cpu(&cpu_devices[i], i, NULL); 230 register_cpu(&cpu_devices[i], i, NULL);
277 231
278 /* call platform init */ 232 /* call platform init */
@@ -352,12 +306,6 @@ void __init setup_arch(char **cmdline_p)
352 do_init_bootmem(); 306 do_init_bootmem();
353 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); 307 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
354 308
355#ifdef CONFIG_PPC_OCP
356 /* Initialize OCP device list */
357 ocp_early_init();
358 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
359#endif
360
361#ifdef CONFIG_DUMMY_CONSOLE 309#ifdef CONFIG_DUMMY_CONSOLE
362 conswitchp = &dummy_con; 310 conswitchp = &dummy_con;
363#endif 311#endif
@@ -366,7 +314,4 @@ void __init setup_arch(char **cmdline_p)
366 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 314 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
367 315
368 paging_init(); 316 paging_init();
369
370 /* this is for modules since _machine can be a define -- Cort */
371 ppc_md.ppc_machine = _machine;
372} 317}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e20c1fae3423..59aa92cd6fa4 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -73,7 +73,6 @@
73 73
74int have_of = 1; 74int have_of = 1;
75int boot_cpuid = 0; 75int boot_cpuid = 0;
76int boot_cpuid_phys = 0;
77dev_t boot_dev; 76dev_t boot_dev;
78u64 ppc64_pft_size; 77u64 ppc64_pft_size;
79 78
@@ -96,11 +95,6 @@ int dcache_bsize;
96int icache_bsize; 95int icache_bsize;
97int ucache_bsize; 96int ucache_bsize;
98 97
99/* The main machine-dep calls structure
100 */
101struct machdep_calls ppc_md;
102EXPORT_SYMBOL(ppc_md);
103
104#ifdef CONFIG_MAGIC_SYSRQ 98#ifdef CONFIG_MAGIC_SYSRQ
105unsigned long SYSRQ_KEY; 99unsigned long SYSRQ_KEY;
106#endif /* CONFIG_MAGIC_SYSRQ */ 100#endif /* CONFIG_MAGIC_SYSRQ */
@@ -161,32 +155,6 @@ early_param("smt-enabled", early_smt_enabled);
161#define check_smt_enabled() 155#define check_smt_enabled()
162#endif /* CONFIG_SMP */ 156#endif /* CONFIG_SMP */
163 157
164extern struct machdep_calls pSeries_md;
165extern struct machdep_calls pmac_md;
166extern struct machdep_calls maple_md;
167extern struct machdep_calls cell_md;
168extern struct machdep_calls iseries_md;
169
170/* Ultimately, stuff them in an elf section like initcalls... */
171static struct machdep_calls __initdata *machines[] = {
172#ifdef CONFIG_PPC_PSERIES
173 &pSeries_md,
174#endif /* CONFIG_PPC_PSERIES */
175#ifdef CONFIG_PPC_PMAC
176 &pmac_md,
177#endif /* CONFIG_PPC_PMAC */
178#ifdef CONFIG_PPC_MAPLE
179 &maple_md,
180#endif /* CONFIG_PPC_MAPLE */
181#ifdef CONFIG_PPC_CELL
182 &cell_md,
183#endif
184#ifdef CONFIG_PPC_ISERIES
185 &iseries_md,
186#endif
187 NULL
188};
189
190/* 158/*
191 * Early initialization entry point. This is called by head.S 159 * Early initialization entry point. This is called by head.S
192 * with MMU translation disabled. We rely on the "feature" of 160 * with MMU translation disabled. We rely on the "feature" of
@@ -208,13 +176,10 @@ static struct machdep_calls __initdata *machines[] = {
208 176
209void __init early_setup(unsigned long dt_ptr) 177void __init early_setup(unsigned long dt_ptr)
210{ 178{
211 struct paca_struct *lpaca = get_paca();
212 static struct machdep_calls **mach;
213
214 /* Enable early debugging if any specified (see udbg.h) */ 179 /* Enable early debugging if any specified (see udbg.h) */
215 udbg_early_init(); 180 udbg_early_init();
216 181
217 DBG(" -> early_setup()\n"); 182 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
218 183
219 /* 184 /*
220 * Do early initializations using the flattened device 185 * Do early initializations using the flattened device
@@ -223,22 +188,16 @@ void __init early_setup(unsigned long dt_ptr)
223 */ 188 */
224 early_init_devtree(__va(dt_ptr)); 189 early_init_devtree(__va(dt_ptr));
225 190
226 /* 191 /* Now we know the logical id of our boot cpu, setup the paca. */
227 * Iterate all ppc_md structures until we find the proper 192 setup_boot_paca();
228 * one for the current machine type
229 */
230 DBG("Probing machine type for platform %x...\n", _machine);
231 193
232 for (mach = machines; *mach; mach++) { 194 /* Fix up paca fields required for the boot cpu */
233 if ((*mach)->probe(_machine)) 195 get_paca()->cpu_start = 1;
234 break; 196 get_paca()->stab_real = __pa((u64)&initial_stab);
235 } 197 get_paca()->stab_addr = (u64)&initial_stab;
236 /* What can we do if we didn't find ? */ 198
237 if (*mach == NULL) { 199 /* Probe the machine type */
238 DBG("No suitable machine found !\n"); 200 probe_machine();
239 for (;;);
240 }
241 ppc_md = **mach;
242 201
243#ifdef CONFIG_CRASH_DUMP 202#ifdef CONFIG_CRASH_DUMP
244 kdump_setup(); 203 kdump_setup();
@@ -260,7 +219,7 @@ void __init early_setup(unsigned long dt_ptr)
260 if (cpu_has_feature(CPU_FTR_SLB)) 219 if (cpu_has_feature(CPU_FTR_SLB))
261 slb_initialize(); 220 slb_initialize();
262 else 221 else
263 stab_initialize(lpaca->stab_real); 222 stab_initialize(get_paca()->stab_real);
264 } 223 }
265 224
266 DBG(" <- early_setup()\n"); 225 DBG(" <- early_setup()\n");
@@ -340,7 +299,7 @@ static void __init initialize_cache_info(void)
340 const char *dc, *ic; 299 const char *dc, *ic;
341 300
342 /* Then read cache informations */ 301 /* Then read cache informations */
343 if (_machine == PLATFORM_POWERMAC) { 302 if (machine_is(powermac)) {
344 dc = "d-cache-block-size"; 303 dc = "d-cache-block-size";
345 ic = "i-cache-block-size"; 304 ic = "i-cache-block-size";
346 } else { 305 } else {
@@ -484,7 +443,6 @@ void __init setup_system(void)
484 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 443 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
485 printk("ppc64_interrupt_controller = 0x%ld\n", 444 printk("ppc64_interrupt_controller = 0x%ld\n",
486 ppc64_interrupt_controller); 445 ppc64_interrupt_controller);
487 printk("platform = 0x%x\n", _machine);
488 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 446 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
489 printk("ppc64_caches.dcache_line_size = 0x%x\n", 447 printk("ppc64_caches.dcache_line_size = 0x%x\n",
490 ppc64_caches.dline_size); 448 ppc64_caches.dline_size);
@@ -516,7 +474,7 @@ static void __init irqstack_early_init(void)
516 * interrupt stacks must be under 256MB, we cannot afford to take 474 * interrupt stacks must be under 256MB, we cannot afford to take
517 * SLB misses on them. 475 * SLB misses on them.
518 */ 476 */
519 for_each_cpu(i) { 477 for_each_possible_cpu(i) {
520 softirq_ctx[i] = (struct thread_info *) 478 softirq_ctx[i] = (struct thread_info *)
521 __va(lmb_alloc_base(THREAD_SIZE, 479 __va(lmb_alloc_base(THREAD_SIZE,
522 THREAD_SIZE, 0x10000000)); 480 THREAD_SIZE, 0x10000000));
@@ -549,7 +507,7 @@ static void __init emergency_stack_init(void)
549 */ 507 */
550 limit = min(0x10000000UL, lmb.rmo_size); 508 limit = min(0x10000000UL, lmb.rmo_size);
551 509
552 for_each_cpu(i) 510 for_each_possible_cpu(i)
553 paca[i].emergency_sp = 511 paca[i].emergency_sp =
554 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; 512 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
555} 513}
@@ -602,12 +560,6 @@ void __init setup_arch(char **cmdline_p)
602 560
603 ppc_md.setup_arch(); 561 ppc_md.setup_arch();
604 562
605 /* Use the default idle loop if the platform hasn't provided one. */
606 if (NULL == ppc_md.idle_loop) {
607 ppc_md.idle_loop = default_idle;
608 printk(KERN_INFO "Using default idle loop\n");
609 }
610
611 paging_init(); 563 paging_init();
612 ppc64_boot_msg(0x15, "Setup Done"); 564 ppc64_boot_msg(0x15, "Setup Done");
613} 565}
@@ -672,7 +624,7 @@ void __init setup_per_cpu_areas(void)
672 size = PERCPU_ENOUGH_ROOM; 624 size = PERCPU_ENOUGH_ROOM;
673#endif 625#endif
674 626
675 for_each_cpu(i) { 627 for_each_possible_cpu(i) {
676 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); 628 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
677 if (!ptr) 629 if (!ptr)
678 panic("Cannot allocate cpu data for CPU %d\n", i); 630 panic("Cannot allocate cpu data for CPU %d\n", i);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d7a4e814974d..01e3c08cb550 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -42,6 +42,7 @@
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
45#include <asm/syscalls.h>
45#include <asm/sigcontext.h> 46#include <asm/sigcontext.h>
46#include <asm/vdso.h> 47#include <asm/vdso.h>
47#ifdef CONFIG_PPC64 48#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 47f910380a6a..27f65b95184d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -33,6 +33,7 @@
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/unistd.h> 34#include <asm/unistd.h>
35#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/syscalls.h>
36#include <asm/vdso.h> 37#include <asm/vdso.h>
37 38
38#define DEBUG_SIG 0 39#define DEBUG_SIG 0
@@ -211,7 +212,7 @@ static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs
211 /* Default to using normal stack */ 212 /* Default to using normal stack */
212 newsp = regs->gpr[1]; 213 newsp = regs->gpr[1];
213 214
214 if (ka->sa.sa_flags & SA_ONSTACK) { 215 if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size) {
215 if (! on_sig_stack(regs->gpr[1])) 216 if (! on_sig_stack(regs->gpr[1]))
216 newsp = (current->sas_ss_sp + current->sas_ss_size); 217 newsp = (current->sas_ss_sp + current->sas_ss_size);
217 } 218 }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 805eaedbc308..530f7dba0bd2 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -362,7 +362,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
362 362
363 smp_space_timers(max_cpus); 363 smp_space_timers(max_cpus);
364 364
365 for_each_cpu(cpu) 365 for_each_possible_cpu(cpu)
366 if (cpu != boot_cpuid) 366 if (cpu != boot_cpuid)
367 smp_create_idle(cpu); 367 smp_create_idle(cpu);
368} 368}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
new file mode 100644
index 000000000000..69773cc1a85f
--- /dev/null
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -0,0 +1,349 @@
1#include <linux/config.h>
2#include <linux/threads.h>
3#include <asm/processor.h>
4#include <asm/page.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/ppc_asm.h>
8#include <asm/asm-offsets.h>
9
10
11/*
12 * Structure for storing CPU registers on the save area.
13 */
14#define SL_SP 0
15#define SL_PC 4
16#define SL_MSR 8
17#define SL_SDR1 0xc
18#define SL_SPRG0 0x10 /* 4 sprg's */
19#define SL_DBAT0 0x20
20#define SL_IBAT0 0x28
21#define SL_DBAT1 0x30
22#define SL_IBAT1 0x38
23#define SL_DBAT2 0x40
24#define SL_IBAT2 0x48
25#define SL_DBAT3 0x50
26#define SL_IBAT3 0x58
27#define SL_TB 0x60
28#define SL_R2 0x68
29#define SL_CR 0x6c
30#define SL_LR 0x70
31#define SL_R12 0x74 /* r12 to r31 */
32#define SL_SIZE (SL_R12 + 80)
33
34 .section .data
35 .align 5
36
37_GLOBAL(swsusp_save_area)
38 .space SL_SIZE
39
40
41 .section .text
42 .align 5
43
44_GLOBAL(swsusp_arch_suspend)
45
46 lis r11,swsusp_save_area@h
47 ori r11,r11,swsusp_save_area@l
48
49 mflr r0
50 stw r0,SL_LR(r11)
51 mfcr r0
52 stw r0,SL_CR(r11)
53 stw r1,SL_SP(r11)
54 stw r2,SL_R2(r11)
55 stmw r12,SL_R12(r11)
56
57 /* Save MSR & SDR1 */
58 mfmsr r4
59 stw r4,SL_MSR(r11)
60 mfsdr1 r4
61 stw r4,SL_SDR1(r11)
62
63 /* Get a stable timebase and save it */
641: mftbu r4
65 stw r4,SL_TB(r11)
66 mftb r5
67 stw r5,SL_TB+4(r11)
68 mftbu r3
69 cmpw r3,r4
70 bne 1b
71
72 /* Save SPRGs */
73 mfsprg r4,0
74 stw r4,SL_SPRG0(r11)
75 mfsprg r4,1
76 stw r4,SL_SPRG0+4(r11)
77 mfsprg r4,2
78 stw r4,SL_SPRG0+8(r11)
79 mfsprg r4,3
80 stw r4,SL_SPRG0+12(r11)
81
82 /* Save BATs */
83 mfdbatu r4,0
84 stw r4,SL_DBAT0(r11)
85 mfdbatl r4,0
86 stw r4,SL_DBAT0+4(r11)
87 mfdbatu r4,1
88 stw r4,SL_DBAT1(r11)
89 mfdbatl r4,1
90 stw r4,SL_DBAT1+4(r11)
91 mfdbatu r4,2
92 stw r4,SL_DBAT2(r11)
93 mfdbatl r4,2
94 stw r4,SL_DBAT2+4(r11)
95 mfdbatu r4,3
96 stw r4,SL_DBAT3(r11)
97 mfdbatl r4,3
98 stw r4,SL_DBAT3+4(r11)
99 mfibatu r4,0
100 stw r4,SL_IBAT0(r11)
101 mfibatl r4,0
102 stw r4,SL_IBAT0+4(r11)
103 mfibatu r4,1
104 stw r4,SL_IBAT1(r11)
105 mfibatl r4,1
106 stw r4,SL_IBAT1+4(r11)
107 mfibatu r4,2
108 stw r4,SL_IBAT2(r11)
109 mfibatl r4,2
110 stw r4,SL_IBAT2+4(r11)
111 mfibatu r4,3
112 stw r4,SL_IBAT3(r11)
113 mfibatl r4,3
114 stw r4,SL_IBAT3+4(r11)
115
116#if 0
117 /* Backup various CPU config stuffs */
118 bl __save_cpu_setup
119#endif
120 /* Call the low level suspend stuff (we should probably have made
121 * a stackframe...
122 */
123 bl swsusp_save
124
125 /* Restore LR from the save area */
126 lis r11,swsusp_save_area@h
127 ori r11,r11,swsusp_save_area@l
128 lwz r0,SL_LR(r11)
129 mtlr r0
130
131 blr
132
133
134/* Resume code */
135_GLOBAL(swsusp_arch_resume)
136
137 /* Stop pending alitvec streams and memory accesses */
138BEGIN_FTR_SECTION
139 DSSALL
140END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141 sync
142
143 /* Disable MSR:DR to make sure we don't take a TLB or
144 * hash miss during the copy, as our hash table will
145 * for a while be unuseable. For .text, we assume we are
146 * covered by a BAT. This works only for non-G5 at this
147 * point. G5 will need a better approach, possibly using
148 * a small temporary hash table filled with large mappings,
149 * disabling the MMU completely isn't a good option for
150 * performance reasons.
151 * (Note that 750's may have the same performance issue as
152 * the G5 in this case, we should investigate using moving
153 * BATs for these CPUs)
154 */
155 mfmsr r0
156 sync
157 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
158 mtmsr r0
159 sync
160 isync
161
162 /* Load ptr the list of pages to copy in r3 */
163 lis r11,(pagedir_nosave - KERNELBASE)@h
164 ori r11,r11,pagedir_nosave@l
165 lwz r10,0(r11)
166
167 /* Copy the pages. This is a very basic implementation, to
168 * be replaced by something more cache efficient */
1691:
170 tophys(r3,r10)
171 li r0,256
172 mtctr r0
173 lwz r11,pbe_address(r3) /* source */
174 tophys(r5,r11)
175 lwz r10,pbe_orig_address(r3) /* destination */
176 tophys(r6,r10)
1772:
178 lwz r8,0(r5)
179 lwz r9,4(r5)
180 lwz r10,8(r5)
181 lwz r11,12(r5)
182 addi r5,r5,16
183 stw r8,0(r6)
184 stw r9,4(r6)
185 stw r10,8(r6)
186 stw r11,12(r6)
187 addi r6,r6,16
188 bdnz 2b
189 lwz r10,pbe_next(r3)
190 cmpwi 0,r10,0
191 bne 1b
192
193 /* Do a very simple cache flush/inval of the L1 to ensure
194 * coherency of the icache
195 */
196 lis r3,0x0002
197 mtctr r3
198 li r3, 0
1991:
200 lwz r0,0(r3)
201 addi r3,r3,0x0020
202 bdnz 1b
203 isync
204 sync
205
206 /* Now flush those cache lines */
207 lis r3,0x0002
208 mtctr r3
209 li r3, 0
2101:
211 dcbf 0,r3
212 addi r3,r3,0x0020
213 bdnz 1b
214 sync
215
216 /* Ok, we are now running with the kernel data of the old
217 * kernel fully restored. We can get to the save area
218 * easily now. As for the rest of the code, it assumes the
219 * loader kernel and the booted one are exactly identical
220 */
221 lis r11,swsusp_save_area@h
222 ori r11,r11,swsusp_save_area@l
223 tophys(r11,r11)
224
225#if 0
226 /* Restore various CPU config stuffs */
227 bl __restore_cpu_setup
228#endif
229 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
230 * This is a bit hairy as we are running out of those BATs,
231 * but first, our code is probably in the icache, and we are
232 * writing the same value to the BAT, so that should be fine,
233 * though a better solution will have to be found long-term
234 */
235 lwz r4,SL_SDR1(r11)
236 mtsdr1 r4
237 lwz r4,SL_SPRG0(r11)
238 mtsprg 0,r4
239 lwz r4,SL_SPRG0+4(r11)
240 mtsprg 1,r4
241 lwz r4,SL_SPRG0+8(r11)
242 mtsprg 2,r4
243 lwz r4,SL_SPRG0+12(r11)
244 mtsprg 3,r4
245
246#if 0
247 lwz r4,SL_DBAT0(r11)
248 mtdbatu 0,r4
249 lwz r4,SL_DBAT0+4(r11)
250 mtdbatl 0,r4
251 lwz r4,SL_DBAT1(r11)
252 mtdbatu 1,r4
253 lwz r4,SL_DBAT1+4(r11)
254 mtdbatl 1,r4
255 lwz r4,SL_DBAT2(r11)
256 mtdbatu 2,r4
257 lwz r4,SL_DBAT2+4(r11)
258 mtdbatl 2,r4
259 lwz r4,SL_DBAT3(r11)
260 mtdbatu 3,r4
261 lwz r4,SL_DBAT3+4(r11)
262 mtdbatl 3,r4
263 lwz r4,SL_IBAT0(r11)
264 mtibatu 0,r4
265 lwz r4,SL_IBAT0+4(r11)
266 mtibatl 0,r4
267 lwz r4,SL_IBAT1(r11)
268 mtibatu 1,r4
269 lwz r4,SL_IBAT1+4(r11)
270 mtibatl 1,r4
271 lwz r4,SL_IBAT2(r11)
272 mtibatu 2,r4
273 lwz r4,SL_IBAT2+4(r11)
274 mtibatl 2,r4
275 lwz r4,SL_IBAT3(r11)
276 mtibatu 3,r4
277 lwz r4,SL_IBAT3+4(r11)
278 mtibatl 3,r4
279#endif
280
281BEGIN_FTR_SECTION
282 li r4,0
283 mtspr SPRN_DBAT4U,r4
284 mtspr SPRN_DBAT4L,r4
285 mtspr SPRN_DBAT5U,r4
286 mtspr SPRN_DBAT5L,r4
287 mtspr SPRN_DBAT6U,r4
288 mtspr SPRN_DBAT6L,r4
289 mtspr SPRN_DBAT7U,r4
290 mtspr SPRN_DBAT7L,r4
291 mtspr SPRN_IBAT4U,r4
292 mtspr SPRN_IBAT4L,r4
293 mtspr SPRN_IBAT5U,r4
294 mtspr SPRN_IBAT5L,r4
295 mtspr SPRN_IBAT6U,r4
296 mtspr SPRN_IBAT6L,r4
297 mtspr SPRN_IBAT7U,r4
298 mtspr SPRN_IBAT7L,r4
299END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
300
301 /* Flush all TLBs */
302 lis r4,0x1000
3031: addic. r4,r4,-0x1000
304 tlbie r4
305 blt 1b
306 sync
307
308 /* restore the MSR and turn on the MMU */
309 lwz r3,SL_MSR(r11)
310 bl turn_on_mmu
311 tovirt(r11,r11)
312
313 /* Restore TB */
314 li r3,0
315 mttbl r3
316 lwz r3,SL_TB(r11)
317 lwz r4,SL_TB+4(r11)
318 mttbu r3
319 mttbl r4
320
321 /* Kick decrementer */
322 li r0,1
323 mtdec r0
324
325 /* Restore the callee-saved registers and return */
326 lwz r0,SL_CR(r11)
327 mtcr r0
328 lwz r2,SL_R2(r11)
329 lmw r12,SL_R12(r11)
330 lwz r1,SL_SP(r11)
331 lwz r0,SL_LR(r11)
332 mtlr r0
333
334 // XXX Note: we don't really need to call swsusp_resume
335
336 li r3,0
337 blr
338
339/* FIXME:This construct is actually not useful since we don't shut
340 * down the instruction MMU, we could just flip back MSR-DR on.
341 */
342turn_on_mmu:
343 mflr r4
344 mtsrr0 r4
345 mtsrr1 r3
346 sync
347 isync
348 rfi
349
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index ad895c99813b..9b69d99a9103 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -40,6 +40,7 @@
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/ipc.h> 41#include <asm/ipc.h>
42#include <asm/semaphore.h> 42#include <asm/semaphore.h>
43#include <asm/syscalls.h>
43#include <asm/time.h> 44#include <asm/time.h>
44#include <asm/unistd.h> 45#include <asm/unistd.h>
45 46
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0f0c3a9ae2e5..73560ef6f802 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -65,20 +65,20 @@ static int __init smt_setup(void)
65 unsigned int cpu; 65 unsigned int cpu;
66 66
67 if (!cpu_has_feature(CPU_FTR_SMT)) 67 if (!cpu_has_feature(CPU_FTR_SMT))
68 return 1; 68 return -ENODEV;
69 69
70 options = find_path_device("/options"); 70 options = find_path_device("/options");
71 if (!options) 71 if (!options)
72 return 1; 72 return -ENODEV;
73 73
74 val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay", 74 val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
75 NULL); 75 NULL);
76 if (!smt_snooze_cmdline && val) { 76 if (!smt_snooze_cmdline && val) {
77 for_each_cpu(cpu) 77 for_each_possible_cpu(cpu)
78 per_cpu(smt_snooze_delay, cpu) = *val; 78 per_cpu(smt_snooze_delay, cpu) = *val;
79 } 79 }
80 80
81 return 1; 81 return 0;
82} 82}
83__initcall(smt_setup); 83__initcall(smt_setup);
84 84
@@ -93,7 +93,7 @@ static int __init setup_smt_snooze_delay(char *str)
93 smt_snooze_cmdline = 1; 93 smt_snooze_cmdline = 1;
94 94
95 if (get_option(&str, &snooze)) { 95 if (get_option(&str, &snooze)) {
96 for_each_cpu(cpu) 96 for_each_possible_cpu(cpu)
97 per_cpu(smt_snooze_delay, cpu) = snooze; 97 per_cpu(smt_snooze_delay, cpu) = snooze;
98 } 98 }
99 99
@@ -347,7 +347,7 @@ static int __init topology_init(void)
347 347
348 register_cpu_notifier(&sysfs_cpu_nb); 348 register_cpu_notifier(&sysfs_cpu_nb);
349 349
350 for_each_cpu(cpu) { 350 for_each_possible_cpu(cpu) {
351 struct cpu *c = &per_cpu(cpu_devices, cpu); 351 struct cpu *c = &per_cpu(cpu_devices, cpu);
352 352
353#ifdef CONFIG_NUMA 353#ifdef CONFIG_NUMA
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
new file mode 100644
index 000000000000..26bd8ea35a4e
--- /dev/null
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -0,0 +1,271 @@
1/*
2 * temp.c Thermal management for cpu's with Thermal Assist Units
3 *
4 * Written by Troy Benjegerdes <hozer@drgw.net>
5 *
6 * TODO:
7 * dynamic power management to limit peak CPU temp (using ICTC)
8 * calibration???
9 *
10 * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
11 * life in portables, and add a 'performance/watt' metric somewhere in /proc
12 */
13
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/jiffies.h>
17#include <linux/kernel.h>
18#include <linux/param.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23
24#include <asm/io.h>
25#include <asm/reg.h>
26#include <asm/nvram.h>
27#include <asm/cache.h>
28#include <asm/8xx_immap.h>
29#include <asm/machdep.h>
30
31static struct tau_temp
32{
33 int interrupts;
34 unsigned char low;
35 unsigned char high;
36 unsigned char grew;
37} tau[NR_CPUS];
38
39struct timer_list tau_timer;
40
41#undef DEBUG
42
43/* TODO: put these in a /proc interface, with some sanity checks, and maybe
44 * dynamic adjustment to minimize # of interrupts */
45/* configurable values for step size and how much to expand the window when
46 * we get an interrupt. These are based on the limit that was out of range */
47#define step_size 2 /* step size when temp goes out of range */
48#define window_expand 1 /* expand the window by this much */
49/* configurable values for shrinking the window */
50#define shrink_timer 2*HZ /* period between shrinking the window */
51#define min_window 2 /* minimum window size, degrees C */
52
53void set_thresholds(unsigned long cpu)
54{
55#ifdef CONFIG_TAU_INT
56 /*
57 * setup THRM1,
58 * threshold, valid bit, enable interrupts, interrupt when below threshold
59 */
60 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
61
62 /* setup THRM2,
63 * threshold, valid bit, enable interrupts, interrupt when above threshhold
64 */
65 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
66#else
67 /* same thing but don't enable interrupts */
68 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
69 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
70#endif
71}
72
73void TAUupdate(int cpu)
74{
75 unsigned thrm;
76
77#ifdef DEBUG
78 printk("TAUupdate ");
79#endif
80
81 /* if both thresholds are crossed, the step_sizes cancel out
82 * and the window winds up getting expanded twice. */
83 if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
84 if(thrm & THRM1_TIN){ /* crossed low threshold */
85 if (tau[cpu].low >= step_size){
86 tau[cpu].low -= step_size;
87 tau[cpu].high -= (step_size - window_expand);
88 }
89 tau[cpu].grew = 1;
90#ifdef DEBUG
91 printk("low threshold crossed ");
92#endif
93 }
94 }
95 if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
96 if(thrm & THRM1_TIN){ /* crossed high threshold */
97 if (tau[cpu].high <= 127-step_size){
98 tau[cpu].low += (step_size - window_expand);
99 tau[cpu].high += step_size;
100 }
101 tau[cpu].grew = 1;
102#ifdef DEBUG
103 printk("high threshold crossed ");
104#endif
105 }
106 }
107
108#ifdef DEBUG
109 printk("grew = %d\n", tau[cpu].grew);
110#endif
111
112#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
113 set_thresholds(cpu);
114#endif
115
116}
117
118#ifdef CONFIG_TAU_INT
119/*
120 * TAU interrupts - called when we have a thermal assist unit interrupt
121 * with interrupts disabled
122 */
123
124void TAUException(struct pt_regs * regs)
125{
126 int cpu = smp_processor_id();
127
128 irq_enter();
129 tau[cpu].interrupts++;
130
131 TAUupdate(cpu);
132
133 irq_exit();
134}
135#endif /* CONFIG_TAU_INT */
136
137static void tau_timeout(void * info)
138{
139 int cpu;
140 unsigned long flags;
141 int size;
142 int shrink;
143
144 /* disabling interrupts *should* be okay */
145 local_irq_save(flags);
146 cpu = smp_processor_id();
147
148#ifndef CONFIG_TAU_INT
149 TAUupdate(cpu);
150#endif
151
152 size = tau[cpu].high - tau[cpu].low;
153 if (size > min_window && ! tau[cpu].grew) {
154 /* do an exponential shrink of half the amount currently over size */
155 shrink = (2 + size - min_window) / 4;
156 if (shrink) {
157 tau[cpu].low += shrink;
158 tau[cpu].high -= shrink;
159 } else { /* size must have been min_window + 1 */
160 tau[cpu].low += 1;
161#if 1 /* debug */
162 if ((tau[cpu].high - tau[cpu].low) != min_window){
163 printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__);
164 }
165#endif
166 }
167 }
168
169 tau[cpu].grew = 0;
170
171 set_thresholds(cpu);
172
173 /*
174 * Do the enable every time, since otherwise a bunch of (relatively)
175 * complex sleep code needs to be added. One mtspr every time
176 * tau_timeout is called is probably not a big deal.
177 *
178 * Enable thermal sensor and set up sample interval timer
179 * need 20 us to do the compare.. until a nice 'cpu_speed' function
180 * call is implemented, just assume a 500 mhz clock. It doesn't really
181 * matter if we take too long for a compare since it's all interrupt
182 * driven anyway.
183 *
184 * use a extra long time.. (60 us @ 500 mhz)
185 */
186 mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
187
188 local_irq_restore(flags);
189}
190
191static void tau_timeout_smp(unsigned long unused)
192{
193
194 /* schedule ourselves to be run again */
195 mod_timer(&tau_timer, jiffies + shrink_timer) ;
196 on_each_cpu(tau_timeout, NULL, 1, 0);
197}
198
199/*
200 * setup the TAU
201 *
202 * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound.
203 * Start off at zero
204 */
205
206int tau_initialized = 0;
207
208void __init TAU_init_smp(void * info)
209{
210 unsigned long cpu = smp_processor_id();
211
212 /* set these to a reasonable value and let the timer shrink the
213 * window */
214 tau[cpu].low = 5;
215 tau[cpu].high = 120;
216
217 set_thresholds(cpu);
218}
219
220int __init TAU_init(void)
221{
222 /* We assume in SMP that if one CPU has TAU support, they
223 * all have it --BenH
224 */
225 if (!cpu_has_feature(CPU_FTR_TAU)) {
226 printk("Thermal assist unit not available\n");
227 tau_initialized = 0;
228 return 1;
229 }
230
231
232 /* first, set up the window shrinking timer */
233 init_timer(&tau_timer);
234 tau_timer.function = tau_timeout_smp;
235 tau_timer.expires = jiffies + shrink_timer;
236 add_timer(&tau_timer);
237
238 on_each_cpu(TAU_init_smp, NULL, 1, 0);
239
240 printk("Thermal assist unit ");
241#ifdef CONFIG_TAU_INT
242 printk("using interrupts, ");
243#else
244 printk("using timers, ");
245#endif
246 printk("shrink_timer: %d jiffies\n", shrink_timer);
247 tau_initialized = 1;
248
249 return 0;
250}
251
252__initcall(TAU_init);
253
254/*
255 * return current temp
256 */
257
258u32 cpu_temp_both(unsigned long cpu)
259{
260 return ((tau[cpu].high << 16) | tau[cpu].low);
261}
262
263int cpu_temp(unsigned long cpu)
264{
265 return ((tau[cpu].high + tau[cpu].low) / 2);
266}
267
268int tau_interrupts(unsigned long cpu)
269{
270 return (tau[cpu].interrupts);
271}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 4a27218a086c..24e3ad756de0 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -261,7 +261,7 @@ void snapshot_timebases(void)
261 261
262 if (!cpu_has_feature(CPU_FTR_PURR)) 262 if (!cpu_has_feature(CPU_FTR_PURR))
263 return; 263 return;
264 for_each_cpu(cpu) 264 for_each_possible_cpu(cpu)
265 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); 265 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
266 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 266 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
267} 267}
@@ -751,7 +751,7 @@ void __init smp_space_timers(unsigned int max_cpus)
751 * systems works better if the two threads' timebase interrupts 751 * systems works better if the two threads' timebase interrupts
752 * are staggered by half a jiffy with respect to each other. 752 * are staggered by half a jiffy with respect to each other.
753 */ 753 */
754 for_each_cpu(i) { 754 for_each_possible_cpu(i) {
755 if (i == boot_cpuid) 755 if (i == boot_cpuid)
756 continue; 756 continue;
757 if (i == (boot_cpuid ^ 1)) 757 if (i == (boot_cpuid ^ 1))
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9763faab6739..4cbde211eb69 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -97,7 +97,6 @@ static DEFINE_SPINLOCK(die_lock);
97int die(const char *str, struct pt_regs *regs, long err) 97int die(const char *str, struct pt_regs *regs, long err)
98{ 98{
99 static int die_counter, crash_dump_start = 0; 99 static int die_counter, crash_dump_start = 0;
100 int nl = 0;
101 100
102 if (debugger(regs)) 101 if (debugger(regs))
103 return 1; 102 return 1;
@@ -106,7 +105,7 @@ int die(const char *str, struct pt_regs *regs, long err)
106 spin_lock_irq(&die_lock); 105 spin_lock_irq(&die_lock);
107 bust_spinlocks(1); 106 bust_spinlocks(1);
108#ifdef CONFIG_PMAC_BACKLIGHT 107#ifdef CONFIG_PMAC_BACKLIGHT
109 if (_machine == _MACH_Pmac) { 108 if (machine_is(powermac)) {
110 set_backlight_enable(1); 109 set_backlight_enable(1);
111 set_backlight_level(BACKLIGHT_MAX); 110 set_backlight_level(BACKLIGHT_MAX);
112 } 111 }
@@ -114,46 +113,18 @@ int die(const char *str, struct pt_regs *regs, long err)
114 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 113 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
115#ifdef CONFIG_PREEMPT 114#ifdef CONFIG_PREEMPT
116 printk("PREEMPT "); 115 printk("PREEMPT ");
117 nl = 1;
118#endif 116#endif
119#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
120 printk("SMP NR_CPUS=%d ", NR_CPUS); 118 printk("SMP NR_CPUS=%d ", NR_CPUS);
121 nl = 1;
122#endif 119#endif
123#ifdef CONFIG_DEBUG_PAGEALLOC 120#ifdef CONFIG_DEBUG_PAGEALLOC
124 printk("DEBUG_PAGEALLOC "); 121 printk("DEBUG_PAGEALLOC ");
125 nl = 1;
126#endif 122#endif
127#ifdef CONFIG_NUMA 123#ifdef CONFIG_NUMA
128 printk("NUMA "); 124 printk("NUMA ");
129 nl = 1;
130#endif 125#endif
131#ifdef CONFIG_PPC64 126 printk("%s\n", ppc_md.name ? "" : ppc_md.name);
132 switch (_machine) { 127
133 case PLATFORM_PSERIES:
134 printk("PSERIES ");
135 nl = 1;
136 break;
137 case PLATFORM_PSERIES_LPAR:
138 printk("PSERIES LPAR ");
139 nl = 1;
140 break;
141 case PLATFORM_ISERIES_LPAR:
142 printk("ISERIES LPAR ");
143 nl = 1;
144 break;
145 case PLATFORM_POWERMAC:
146 printk("POWERMAC ");
147 nl = 1;
148 break;
149 case PLATFORM_CELL:
150 printk("CELL ");
151 nl = 1;
152 break;
153 }
154#endif
155 if (nl)
156 printk("\n");
157 print_modules(); 128 print_modules();
158 show_regs(regs); 129 show_regs(regs);
159 bust_spinlocks(0); 130 bust_spinlocks(0);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ec8370368423..573afb68d69e 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -33,6 +33,7 @@
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34#include <asm/cputable.h> 34#include <asm/cputable.h>
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/firmware.h>
36#include <asm/vdso.h> 37#include <asm/vdso.h>
37#include <asm/vdso_datapage.h> 38#include <asm/vdso_datapage.h>
38 39
@@ -667,7 +668,13 @@ void __init vdso_init(void)
667 vdso_data->version.major = SYSTEMCFG_MAJOR; 668 vdso_data->version.major = SYSTEMCFG_MAJOR;
668 vdso_data->version.minor = SYSTEMCFG_MINOR; 669 vdso_data->version.minor = SYSTEMCFG_MINOR;
669 vdso_data->processor = mfspr(SPRN_PVR); 670 vdso_data->processor = mfspr(SPRN_PVR);
670 vdso_data->platform = _machine; 671 /*
672 * Fake the old platform number for pSeries and iSeries and add
673 * in LPAR bit if necessary
674 */
675 vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100;
676 if (firmware_has_feature(FW_FEATURE_LPAR))
677 vdso_data->platform |= 1;
671 vdso_data->physicalMemorySize = lmb_phys_mem_size(); 678 vdso_data->physicalMemorySize = lmb_phys_mem_size();
672 vdso_data->dcache_size = ppc64_caches.dsize; 679 vdso_data->dcache_size = ppc64_caches.dsize;
673 vdso_data->dcache_line_size = ppc64_caches.dline_size; 680 vdso_data->dcache_line_size = ppc64_caches.dline_size;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 7fa7b15fd8e6..fe79c2584cb0 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -1,9 +1,11 @@
1#include <linux/config.h> 1#include <linux/config.h>
2#ifdef CONFIG_PPC64 2#ifdef CONFIG_PPC64
3#include <asm/page.h> 3#include <asm/page.h>
4#define PROVIDE32(x) PROVIDE(__unused__##x)
4#else 5#else
5#define PAGE_SIZE 4096 6#define PAGE_SIZE 4096
6#define KERNELBASE CONFIG_KERNEL_START 7#define KERNELBASE CONFIG_KERNEL_START
8#define PROVIDE32(x) PROVIDE(x)
7#endif 9#endif
8#include <asm-generic/vmlinux.lds.h> 10#include <asm-generic/vmlinux.lds.h>
9 11
@@ -18,43 +20,42 @@ jiffies = jiffies_64 + 4;
18#endif 20#endif
19SECTIONS 21SECTIONS
20{ 22{
21 /* Sections to be discarded. */ 23 /* Sections to be discarded. */
22 /DISCARD/ : { 24 /DISCARD/ : {
23 *(.exitcall.exit) 25 *(.exitcall.exit)
24 *(.exit.data) 26 *(.exit.data)
25 } 27 }
26
27 . = KERNELBASE;
28
29 /* Read-only sections, merged into text segment: */
30 .text : {
31 *(.text .text.*)
32 SCHED_TEXT
33 LOCK_TEXT
34 KPROBES_TEXT
35 *(.fixup)
36#ifdef CONFIG_PPC32
37 *(.got1)
38 __got2_start = .;
39 *(.got2)
40 __got2_end = .;
41#else
42 . = ALIGN(PAGE_SIZE);
43 _etext = .;
44#endif
45 }
46#ifdef CONFIG_PPC32
47 _etext = .;
48 PROVIDE (etext = .);
49 28
50 RODATA 29 . = KERNELBASE;
51 .fini : { *(.fini) } =0
52 .ctors : { *(.ctors) }
53 .dtors : { *(.dtors) }
54 30
55 .fixup : { *(.fixup) } 31/*
56#endif 32 * Text, read only data and other permanent read-only sections
33 */
34
35 /* Text and gots */
36 .text : {
37 *(.text .text.*)
38 SCHED_TEXT
39 LOCK_TEXT
40 KPROBES_TEXT
41 *(.fixup)
57 42
43#ifdef CONFIG_PPC32
44 *(.got1)
45 __got2_start = .;
46 *(.got2)
47 __got2_end = .;
48#endif /* CONFIG_PPC32 */
49
50 . = ALIGN(PAGE_SIZE);
51 _etext = .;
52 PROVIDE32 (etext = .);
53 }
54
55 /* Read-only data */
56 RODATA
57
58 /* Exception & bug tables */
58 __ex_table : { 59 __ex_table : {
59 __start___ex_table = .; 60 __start___ex_table = .;
60 *(__ex_table) 61 *(__ex_table)
@@ -67,192 +68,172 @@ SECTIONS
67 __stop___bug_table = .; 68 __stop___bug_table = .;
68 } 69 }
69 70
70#ifdef CONFIG_PPC64 71/*
72 * Init sections discarded at runtime
73 */
74 . = ALIGN(PAGE_SIZE);
75 __init_begin = .;
76
77 .init.text : {
78 _sinittext = .;
79 *(.init.text)
80 _einittext = .;
81 }
82
83 /* .exit.text is discarded at runtime, not link time,
84 * to deal with references from __bug_table
85 */
86 .exit.text : { *(.exit.text) }
87
88 .init.data : {
89 *(.init.data);
90 __vtop_table_begin = .;
91 *(.vtop_fixup);
92 __vtop_table_end = .;
93 __ptov_table_begin = .;
94 *(.ptov_fixup);
95 __ptov_table_end = .;
96 }
97
98 . = ALIGN(16);
99 .init.setup : {
100 __setup_start = .;
101 *(.init.setup)
102 __setup_end = .;
103 }
104
105 .initcall.init : {
106 __initcall_start = .;
107 *(.initcall1.init)
108 *(.initcall2.init)
109 *(.initcall3.init)
110 *(.initcall4.init)
111 *(.initcall5.init)
112 *(.initcall6.init)
113 *(.initcall7.init)
114 __initcall_end = .;
115 }
116
117 .con_initcall.init : {
118 __con_initcall_start = .;
119 *(.con_initcall.init)
120 __con_initcall_end = .;
121 }
122
123 SECURITY_INIT
124
125 . = ALIGN(8);
71 __ftr_fixup : { 126 __ftr_fixup : {
72 __start___ftr_fixup = .; 127 __start___ftr_fixup = .;
73 *(__ftr_fixup) 128 *(__ftr_fixup)
74 __stop___ftr_fixup = .; 129 __stop___ftr_fixup = .;
75 } 130 }
76 131
77 RODATA 132 . = ALIGN(PAGE_SIZE);
78#endif 133 .init.ramfs : {
134 __initramfs_start = .;
135 *(.init.ramfs)
136 __initramfs_end = .;
137 }
79 138
80#ifdef CONFIG_PPC32 139#ifdef CONFIG_PPC32
81 /* Read-write section, merged into data segment: */ 140 . = ALIGN(32);
82 . = ALIGN(PAGE_SIZE); 141#else
83 _sdata = .; 142 . = ALIGN(128);
84 .data :
85 {
86 *(.data)
87 *(.data1)
88 *(.sdata)
89 *(.sdata2)
90 *(.got.plt) *(.got)
91 *(.dynamic)
92 CONSTRUCTORS
93 }
94
95 . = ALIGN(PAGE_SIZE);
96 __nosave_begin = .;
97 .data_nosave : { *(.data.nosave) }
98 . = ALIGN(PAGE_SIZE);
99 __nosave_end = .;
100
101 . = ALIGN(32);
102 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
103
104 _edata = .;
105 PROVIDE (edata = .);
106
107 . = ALIGN(8192);
108 .data.init_task : { *(.data.init_task) }
109#endif 143#endif
144 .data.percpu : {
145 __per_cpu_start = .;
146 *(.data.percpu)
147 __per_cpu_end = .;
148 }
110 149
111 /* will be freed after init */ 150 . = ALIGN(8);
112 . = ALIGN(PAGE_SIZE); 151 .machine.desc : {
113 __init_begin = .; 152 __machine_desc_start = . ;
114 .init.text : { 153 *(.machine.desc)
115 _sinittext = .; 154 __machine_desc_end = . ;
116 *(.init.text) 155 }
117 _einittext = .; 156
118 } 157 /* freed after init ends here */
119#ifdef CONFIG_PPC32 158 . = ALIGN(PAGE_SIZE);
120 /* .exit.text is discarded at runtime, not link time, 159 __init_end = .;
121 to deal with references from __bug_table */ 160
122 .exit.text : { *(.exit.text) } 161/*
123#endif 162 * And now the various read/write data
124 .init.data : { 163 */
125 *(.init.data); 164
126 __vtop_table_begin = .; 165 . = ALIGN(PAGE_SIZE);
127 *(.vtop_fixup); 166 _sdata = .;
128 __vtop_table_end = .;
129 __ptov_table_begin = .;
130 *(.ptov_fixup);
131 __ptov_table_end = .;
132 }
133
134 . = ALIGN(16);
135 .init.setup : {
136 __setup_start = .;
137 *(.init.setup)
138 __setup_end = .;
139 }
140
141 .initcall.init : {
142 __initcall_start = .;
143 *(.initcall1.init)
144 *(.initcall2.init)
145 *(.initcall3.init)
146 *(.initcall4.init)
147 *(.initcall5.init)
148 *(.initcall6.init)
149 *(.initcall7.init)
150 __initcall_end = .;
151 }
152
153 .con_initcall.init : {
154 __con_initcall_start = .;
155 *(.con_initcall.init)
156 __con_initcall_end = .;
157 }
158
159 SECURITY_INIT
160 167
161#ifdef CONFIG_PPC32 168#ifdef CONFIG_PPC32
162 __start___ftr_fixup = .; 169 .data :
163 __ftr_fixup : { *(__ftr_fixup) } 170 {
164 __stop___ftr_fixup = .; 171 *(.data)
172 *(.sdata)
173 *(.got.plt) *(.got)
174 }
165#else 175#else
166 . = ALIGN(PAGE_SIZE); 176 .data : {
167 .init.ramfs : { 177 *(.data .data.rel* .toc1)
168 __initramfs_start = .; 178 *(.branch_lt)
169 *(.init.ramfs) 179 }
170 __initramfs_end = .;
171 }
172#endif
173 180
174#ifdef CONFIG_PPC32 181 .opd : {
175 . = ALIGN(32); 182 *(.opd)
183 }
184
185 .got : {
186 __toc_start = .;
187 *(.got)
188 *(.toc)
189 }
176#endif 190#endif
177 .data.percpu : {
178 __per_cpu_start = .;
179 *(.data.percpu)
180 __per_cpu_end = .;
181 }
182 191
183 . = ALIGN(PAGE_SIZE); 192 . = ALIGN(PAGE_SIZE);
184#ifdef CONFIG_PPC64 193 _edata = .;
185 . = ALIGN(16384); 194 PROVIDE32 (edata = .);
186 __init_end = .; 195
187 /* freed after init ends here */ 196 /* The initial task and kernel stack */
188 197#ifdef CONFIG_PPC32
189 /* Read/write sections */ 198 . = ALIGN(8192);
190 . = ALIGN(PAGE_SIZE);
191 . = ALIGN(16384);
192 _sdata = .;
193 /* The initial task and kernel stack */
194 .data.init_task : {
195 *(.data.init_task)
196 }
197
198 . = ALIGN(PAGE_SIZE);
199 .data.page_aligned : {
200 *(.data.page_aligned)
201 }
202
203 .data.cacheline_aligned : {
204 *(.data.cacheline_aligned)
205 }
206
207 .data : {
208 *(.data .data.rel* .toc1)
209 *(.branch_lt)
210 }
211
212 .opd : {
213 *(.opd)
214 }
215
216 .got : {
217 __toc_start = .;
218 *(.got)
219 *(.toc)
220 . = ALIGN(PAGE_SIZE);
221 _edata = .;
222 }
223
224 . = ALIGN(PAGE_SIZE);
225#else 199#else
226 __initramfs_start = .; 200 . = ALIGN(16384);
227 .init.ramfs : { 201#endif
228 *(.init.ramfs) 202 .data.init_task : {
229 } 203 *(.data.init_task)
230 __initramfs_end = .; 204 }
231 205
232 . = ALIGN(4096); 206 . = ALIGN(PAGE_SIZE);
233 __init_end = .; 207 .data.page_aligned : {
208 *(.data.page_aligned)
209 }
234 210
235 . = ALIGN(4096); 211 .data.cacheline_aligned : {
236 _sextratext = .; 212 *(.data.cacheline_aligned)
237 _eextratext = .; 213 }
238 214
239 __bss_start = .; 215 . = ALIGN(PAGE_SIZE);
240#endif 216 __data_nosave : {
217 __nosave_begin = .;
218 *(.data.nosave)
219 . = ALIGN(PAGE_SIZE);
220 __nosave_end = .;
221 }
241 222
242 .bss : { 223/*
243 __bss_start = .; 224 * And finally the bss
244 *(.sbss) *(.scommon) 225 */
245 *(.dynbss) 226
246 *(.bss) 227 .bss : {
247 *(COMMON) 228 __bss_start = .;
248 __bss_stop = .; 229 *(.sbss) *(.scommon)
249 } 230 *(.dynbss)
231 *(.bss)
232 *(COMMON)
233 __bss_stop = .;
234 }
250 235
251#ifdef CONFIG_PPC64 236 . = ALIGN(PAGE_SIZE);
252 . = ALIGN(PAGE_SIZE); 237 _end = . ;
253#endif 238 PROVIDE32 (end = .);
254 _end = . ;
255#ifdef CONFIG_PPC32
256 PROVIDE (end = .);
257#endif
258} 239}