aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-10 10:37:51 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-10 10:37:51 -0500
commit3ae0af12b458461f36dfddb26e54056be32928dd (patch)
tree063059f24f42506ce2a86374a3b6e2b7a8ae3fcf /arch/powerpc
parent3b44f137b9a846c5452d9e6e1271b79b1dbcc942 (diff)
parent7c43ee40ec602db3fa27e6e2d4f092f06ab0901c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S233
-rw-r--r--arch/powerpc/kernel/cputable.c19
-rw-r--r--arch/powerpc/kernel/firmware.c45
-rw-r--r--arch/powerpc/kernel/fpu.S24
-rw-r--r--arch/powerpc/kernel/head_64.S91
-rw-r--r--arch/powerpc/kernel/ioctl32.c49
-rw-r--r--arch/powerpc/kernel/irq.c478
-rw-r--r--arch/powerpc/kernel/lparcfg.c612
-rw-r--r--arch/powerpc/kernel/misc_32.S23
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/paca.c142
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c126
-rw-r--r--arch/powerpc/kernel/prom.c35
-rw-r--r--arch/powerpc/kernel/prom_init.c187
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/rtas_pci.c513
-rw-r--r--arch/powerpc/kernel/setup-common.c37
-rw-r--r--arch/powerpc/kernel/setup.h6
-rw-r--r--arch/powerpc/kernel/setup_32.c18
-rw-r--r--arch/powerpc/kernel/setup_64.c93
-rw-r--r--arch/powerpc/kernel/signal_32.c1
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c384
-rw-r--r--arch/powerpc/kernel/time.c31
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/lib/bitops.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c38
-rw-r--r--arch/powerpc/mm/init_32.c3
-rw-r--r--arch/powerpc/mm/init_64.c20
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c7
-rw-r--r--arch/powerpc/mm/stab.c21
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c3
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/iseries/irq.c24
-rw-r--r--arch/powerpc/platforms/iseries/misc.S1
-rw-r--r--arch/powerpc/platforms/iseries/setup.c27
-rw-r--r--arch/powerpc/platforms/maple/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c51
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1212
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c155
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/pseries/pci.c3
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c8
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c235
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/powerpc/platforms/pseries/smp.c5
-rw-r--r--arch/powerpc/platforms/pseries/xics.c7
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c2
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/nonstdio.c134
-rw-r--r--arch/powerpc/xmon/nonstdio.h28
-rw-r--r--arch/powerpc/xmon/setjmp.S176
-rw-r--r--arch/powerpc/xmon/start_32.c235
-rw-r--r--arch/powerpc/xmon/start_64.c167
-rw-r--r--arch/powerpc/xmon/start_8xx.c255
-rw-r--r--arch/powerpc/xmon/subr_prf.c54
-rw-r--r--arch/powerpc/xmon/xmon.c50
68 files changed, 4981 insertions, 1173 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1493c7896fe3..ed31062029f7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -599,6 +599,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
599 def_bool y 599 def_bool y
600 depends on NEED_MULTIPLE_NODES 600 depends on NEED_MULTIPLE_NODES
601 601
602config ARCH_MEMORY_PROBE
603 def_bool y
604 depends on MEMORY_HOTPLUG
605
602# Some NUMA nodes have memory ranges that span 606# Some NUMA nodes have memory ranges that span
603# other nodes. Even though a pfn is valid and 607# other nodes. Even though a pfn is valid and
604# between a node's start and end pfns, it may not 608# between a node's start and end pfns, it may not
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b3ae2993efb8..c04bbd320594 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,6 +4,7 @@
4 4
5ifeq ($(CONFIG_PPC64),y) 5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc 6EXTRA_CFLAGS += -mno-minimal-toc
7CFLAGS_ioctl32.o += -Ifs/
7endif 8endif
8ifeq ($(CONFIG_PPC32),y) 9ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC 10CFLAGS_prom_init.o += -fPIC
@@ -11,15 +12,21 @@ CFLAGS_btext.o += -fPIC
11endif 12endif
12 13
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 14obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o 15 irq.o signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 16obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
16 signal_64.o ptrace32.o systbl.o 17 signal_64.o ptrace32.o systbl.o \
18 paca.o ioctl32.o cpu_setup_power4.o \
19 firmware.o sysfs.o
17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 20obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o 21obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o 22obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o 23procfs-$(CONFIG_PPC64) := proc_ppc64.o
24obj-$(CONFIG_PROC_FS) += $(procfs-y)
25rtaspci-$(CONFIG_PPC64) := rtas_pci.o
26obj-$(CONFIG_PPC_RTAS) += rtas.o $(rtaspci-y)
21obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 27obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
22obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 28obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
29obj-$(CONFIG_LPARCFG) += lparcfg.o
23obj-$(CONFIG_IBMVIO) += vio.o 30obj-$(CONFIG_IBMVIO) += vio.o
24obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o 31obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
25 32
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b75757251994..8793102711a8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -106,7 +106,6 @@ int main(void)
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); 106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
110 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR); 109 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
111 110
112 /* paca */ 111 /* paca */
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
new file mode 100644
index 000000000000..cca942fe6115
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -0,0 +1,233 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <linux/config.h>
13#include <asm/processor.h>
14#include <asm/page.h>
15#include <asm/cputable.h>
16#include <asm/ppc_asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/cache.h>
19
20_GLOBAL(__970_cpu_preinit)
21 /*
22 * Do nothing if not running in HV mode
23 */
24 mfmsr r0
25 rldicl. r0,r0,4,63
26 beqlr
27
28 /*
29 * Deal only with PPC970 and PPC970FX.
30 */
31 mfspr r0,SPRN_PVR
32 srwi r0,r0,16
33 cmpwi r0,0x39
34 beq 1f
35 cmpwi r0,0x3c
36 beq 1f
37 cmpwi r0,0x44
38 bnelr
391:
40
41 /* Make sure HID4:rm_ci is off before MMU is turned off, that large
42 * pages are enabled with HID4:61 and clear HID5:DCBZ_size and
43 * HID5:DCBZ32_ill
44 */
45 li r0,0
46 mfspr r3,SPRN_HID4
47 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
48 rldimi r3,r0,2,61 /* clear bit 61 (lg_pg_en) */
49 sync
50 mtspr SPRN_HID4,r3
51 isync
52 sync
53 mfspr r3,SPRN_HID5
54 rldimi r3,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
55 sync
56 mtspr SPRN_HID5,r3
57 isync
58 sync
59
60 /* Setup some basic HID1 features */
61 mfspr r0,SPRN_HID1
62 li r3,0x1200 /* enable i-fetch cacheability */
63 sldi r3,r3,44 /* and prefetch */
64 or r0,r0,r3
65 mtspr SPRN_HID1,r0
66 mtspr SPRN_HID1,r0
67 isync
68
69 /* Clear HIOR */
70 li r0,0
71 sync
72 mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
73 isync
74 blr
75
76_GLOBAL(__setup_cpu_power4)
77 blr
78
79_GLOBAL(__setup_cpu_be)
80 /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
81 addi r3, 0, 0
82 ori r3, r3, HID6_LB
83 sldi r3, r3, 32
84 nor r3, r3, r3
85 mfspr r4, SPRN_HID6
86 and r4, r4, r3
87 addi r3, 0, 0x02000
88 sldi r3, r3, 32
89 or r4, r4, r3
90 mtspr SPRN_HID6, r4
91 blr
92
93_GLOBAL(__setup_cpu_ppc970)
94 mfspr r0,SPRN_HID0
95 li r11,5 /* clear DOZE and SLEEP */
96 rldimi r0,r11,52,8 /* set NAP and DPM */
97 mtspr SPRN_HID0,r0
98 mfspr r0,SPRN_HID0
99 mfspr r0,SPRN_HID0
100 mfspr r0,SPRN_HID0
101 mfspr r0,SPRN_HID0
102 mfspr r0,SPRN_HID0
103 mfspr r0,SPRN_HID0
104 sync
105 isync
106 blr
107
108/* Definitions for the table use to save CPU states */
109#define CS_HID0 0
110#define CS_HID1 8
111#define CS_HID4 16
112#define CS_HID5 24
113#define CS_SIZE 32
114
115 .data
116 .balign L1_CACHE_BYTES,0
117cpu_state_storage:
118 .space CS_SIZE
119 .balign L1_CACHE_BYTES,0
120 .text
121
122/* Called in normal context to backup CPU 0 state. This
123 * does not include cache settings. This function is also
124 * called for machine sleep. This does not include the MMU
125 * setup, BATs, etc... but rather the "special" registers
126 * like HID0, HID1, HID4, etc...
127 */
128_GLOBAL(__save_cpu_setup)
129 /* Some CR fields are volatile, we back it up all */
130 mfcr r7
131
132 /* Get storage ptr */
133 LOADADDR(r5,cpu_state_storage)
134
135 /* We only deal with 970 for now */
136 mfspr r0,SPRN_PVR
137 srwi r0,r0,16
138 cmpwi r0,0x39
139 beq 1f
140 cmpwi r0,0x3c
141 beq 1f
142 cmpwi r0,0x44
143 bne 2f
144
1451: /* Save HID0,1,4 and 5 */
146 mfspr r3,SPRN_HID0
147 std r3,CS_HID0(r5)
148 mfspr r3,SPRN_HID1
149 std r3,CS_HID1(r5)
150 mfspr r3,SPRN_HID4
151 std r3,CS_HID4(r5)
152 mfspr r3,SPRN_HID5
153 std r3,CS_HID5(r5)
154
1552:
156 mtcr r7
157 blr
158
159/* Called with no MMU context (typically MSR:IR/DR off) to
160 * restore CPU state as backed up by the previous
161 * function. This does not include cache setting
162 */
163_GLOBAL(__restore_cpu_setup)
164 /* Get storage ptr (FIXME when using anton reloc as we
165 * are running with translation disabled here
166 */
167 LOADADDR(r5,cpu_state_storage)
168
169 /* We only deal with 970 for now */
170 mfspr r0,SPRN_PVR
171 srwi r0,r0,16
172 cmpwi r0,0x39
173 beq 1f
174 cmpwi r0,0x3c
175 beq 1f
176 cmpwi r0,0x44
177 bnelr
178
1791: /* Before accessing memory, we make sure rm_ci is clear */
180 li r0,0
181 mfspr r3,SPRN_HID4
182 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
183 sync
184 mtspr SPRN_HID4,r3
185 isync
186 sync
187
188 /* Clear interrupt prefix */
189 li r0,0
190 sync
191 mtspr SPRN_HIOR,0
192 isync
193
194 /* Restore HID0 */
195 ld r3,CS_HID0(r5)
196 sync
197 isync
198 mtspr SPRN_HID0,r3
199 mfspr r3,SPRN_HID0
200 mfspr r3,SPRN_HID0
201 mfspr r3,SPRN_HID0
202 mfspr r3,SPRN_HID0
203 mfspr r3,SPRN_HID0
204 mfspr r3,SPRN_HID0
205 sync
206 isync
207
208 /* Restore HID1 */
209 ld r3,CS_HID1(r5)
210 sync
211 isync
212 mtspr SPRN_HID1,r3
213 mtspr SPRN_HID1,r3
214 sync
215 isync
216
217 /* Restore HID4 */
218 ld r3,CS_HID4(r5)
219 sync
220 isync
221 mtspr SPRN_HID4,r3
222 sync
223 isync
224
225 /* Restore HID5 */
226 ld r3,CS_HID5(r5)
227 sync
228 isync
229 mtspr SPRN_HID5,r3
230 sync
231 isync
232 blr
233
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index cc4e9eb1c13f..1d85cedbbb7b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -52,6 +52,9 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
52#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ 52#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
53 PPC_FEATURE_HAS_MMU) 53 PPC_FEATURE_HAS_MMU)
54#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) 54#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
55#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
56#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
57#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
55 58
56 59
57/* We only set the spe features if the kernel was compiled with 60/* We only set the spe features if the kernel was compiled with
@@ -160,7 +163,7 @@ struct cpu_spec cpu_specs[] = {
160 .pvr_value = 0x00350000, 163 .pvr_value = 0x00350000,
161 .cpu_name = "POWER4 (gp)", 164 .cpu_name = "POWER4 (gp)",
162 .cpu_features = CPU_FTRS_POWER4, 165 .cpu_features = CPU_FTRS_POWER4,
163 .cpu_user_features = COMMON_USER_PPC64, 166 .cpu_user_features = COMMON_USER_POWER4,
164 .icache_bsize = 128, 167 .icache_bsize = 128,
165 .dcache_bsize = 128, 168 .dcache_bsize = 128,
166 .num_pmcs = 8, 169 .num_pmcs = 8,
@@ -175,7 +178,7 @@ struct cpu_spec cpu_specs[] = {
175 .pvr_value = 0x00380000, 178 .pvr_value = 0x00380000,
176 .cpu_name = "POWER4+ (gq)", 179 .cpu_name = "POWER4+ (gq)",
177 .cpu_features = CPU_FTRS_POWER4, 180 .cpu_features = CPU_FTRS_POWER4,
178 .cpu_user_features = COMMON_USER_PPC64, 181 .cpu_user_features = COMMON_USER_POWER4,
179 .icache_bsize = 128, 182 .icache_bsize = 128,
180 .dcache_bsize = 128, 183 .dcache_bsize = 128,
181 .num_pmcs = 8, 184 .num_pmcs = 8,
@@ -190,7 +193,7 @@ struct cpu_spec cpu_specs[] = {
190 .pvr_value = 0x00390000, 193 .pvr_value = 0x00390000,
191 .cpu_name = "PPC970", 194 .cpu_name = "PPC970",
192 .cpu_features = CPU_FTRS_PPC970, 195 .cpu_features = CPU_FTRS_PPC970,
193 .cpu_user_features = COMMON_USER_PPC64 | 196 .cpu_user_features = COMMON_USER_POWER4 |
194 PPC_FEATURE_HAS_ALTIVEC_COMP, 197 PPC_FEATURE_HAS_ALTIVEC_COMP,
195 .icache_bsize = 128, 198 .icache_bsize = 128,
196 .dcache_bsize = 128, 199 .dcache_bsize = 128,
@@ -212,7 +215,7 @@ struct cpu_spec cpu_specs[] = {
212#else 215#else
213 .cpu_features = CPU_FTRS_PPC970, 216 .cpu_features = CPU_FTRS_PPC970,
214#endif 217#endif
215 .cpu_user_features = COMMON_USER_PPC64 | 218 .cpu_user_features = COMMON_USER_POWER4 |
216 PPC_FEATURE_HAS_ALTIVEC_COMP, 219 PPC_FEATURE_HAS_ALTIVEC_COMP,
217 .icache_bsize = 128, 220 .icache_bsize = 128,
218 .dcache_bsize = 128, 221 .dcache_bsize = 128,
@@ -230,7 +233,7 @@ struct cpu_spec cpu_specs[] = {
230 .pvr_value = 0x00440000, 233 .pvr_value = 0x00440000,
231 .cpu_name = "PPC970MP", 234 .cpu_name = "PPC970MP",
232 .cpu_features = CPU_FTRS_PPC970, 235 .cpu_features = CPU_FTRS_PPC970,
233 .cpu_user_features = COMMON_USER_PPC64 | 236 .cpu_user_features = COMMON_USER_POWER4 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP, 237 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128, 238 .icache_bsize = 128,
236 .dcache_bsize = 128, 239 .dcache_bsize = 128,
@@ -245,7 +248,7 @@ struct cpu_spec cpu_specs[] = {
245 .pvr_value = 0x003a0000, 248 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)", 249 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTRS_POWER5, 250 .cpu_features = CPU_FTRS_POWER5,
248 .cpu_user_features = COMMON_USER_PPC64, 251 .cpu_user_features = COMMON_USER_POWER5,
249 .icache_bsize = 128, 252 .icache_bsize = 128,
250 .dcache_bsize = 128, 253 .dcache_bsize = 128,
251 .num_pmcs = 6, 254 .num_pmcs = 6,
@@ -260,7 +263,7 @@ struct cpu_spec cpu_specs[] = {
260 .pvr_value = 0x003b0000, 263 .pvr_value = 0x003b0000,
261 .cpu_name = "POWER5 (gs)", 264 .cpu_name = "POWER5 (gs)",
262 .cpu_features = CPU_FTRS_POWER5, 265 .cpu_features = CPU_FTRS_POWER5,
263 .cpu_user_features = COMMON_USER_PPC64, 266 .cpu_user_features = COMMON_USER_POWER5_PLUS,
264 .icache_bsize = 128, 267 .icache_bsize = 128,
265 .dcache_bsize = 128, 268 .dcache_bsize = 128,
266 .num_pmcs = 6, 269 .num_pmcs = 6,
@@ -276,7 +279,7 @@ struct cpu_spec cpu_specs[] = {
276 .cpu_name = "Cell Broadband Engine", 279 .cpu_name = "Cell Broadband Engine",
277 .cpu_features = CPU_FTRS_CELL, 280 .cpu_features = CPU_FTRS_CELL,
278 .cpu_user_features = COMMON_USER_PPC64 | 281 .cpu_user_features = COMMON_USER_PPC64 |
279 PPC_FEATURE_HAS_ALTIVEC_COMP, 282 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP,
280 .icache_bsize = 128, 283 .icache_bsize = 128,
281 .dcache_bsize = 128, 284 .dcache_bsize = 128,
282 .cpu_setup = __setup_cpu_be, 285 .cpu_setup = __setup_cpu_be,
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
new file mode 100644
index 000000000000..65eae752a527
--- /dev/null
+++ b/arch/powerpc/kernel/firmware.c
@@ -0,0 +1,45 @@
1/*
2 * Extracted from cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17
18#include <asm/firmware.h>
19
20unsigned long ppc64_firmware_features;
21
22#ifdef CONFIG_PPC_PSERIES
23firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
24 {FW_FEATURE_PFT, "hcall-pft"},
25 {FW_FEATURE_TCE, "hcall-tce"},
26 {FW_FEATURE_SPRG0, "hcall-sprg0"},
27 {FW_FEATURE_DABR, "hcall-dabr"},
28 {FW_FEATURE_COPY, "hcall-copy"},
29 {FW_FEATURE_ASR, "hcall-asr"},
30 {FW_FEATURE_DEBUG, "hcall-debug"},
31 {FW_FEATURE_PERF, "hcall-perf"},
32 {FW_FEATURE_DUMP, "hcall-dump"},
33 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
34 {FW_FEATURE_MIGRATE, "hcall-migrate"},
35 {FW_FEATURE_PERFMON, "hcall-perfmon"},
36 {FW_FEATURE_CRQ, "hcall-crq"},
37 {FW_FEATURE_VIO, "hcall-vio"},
38 {FW_FEATURE_RDMA, "hcall-rdma"},
39 {FW_FEATURE_LLAN, "hcall-lLAN"},
40 {FW_FEATURE_BULK, "hcall-bulk"},
41 {FW_FEATURE_XDABR, "hcall-xdabr"},
42 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
43 {FW_FEATURE_SPLPAR, "hcall-splpar"},
44};
45#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 4d6001fa1cf2..b780b42c95fc 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -41,20 +41,20 @@ _GLOBAL(load_up_fpu)
41#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
42 LOADBASE(r3, last_task_used_math) 42 LOADBASE(r3, last_task_used_math)
43 toreal(r3) 43 toreal(r3)
44 LDL r4,OFF(last_task_used_math)(r3) 44 PPC_LL r4,OFF(last_task_used_math)(r3)
45 CMPI 0,r4,0 45 PPC_LCMPI 0,r4,0
46 beq 1f 46 beq 1f
47 toreal(r4) 47 toreal(r4)
48 addi r4,r4,THREAD /* want last_task_used_math->thread */ 48 addi r4,r4,THREAD /* want last_task_used_math->thread */
49 SAVE_32FPRS(0, r4) 49 SAVE_32FPRS(0, r4)
50 mffs fr0 50 mffs fr0
51 stfd fr0,THREAD_FPSCR(r4) 51 stfd fr0,THREAD_FPSCR(r4)
52 LDL r5,PT_REGS(r4) 52 PPC_LL r5,PT_REGS(r4)
53 toreal(r5) 53 toreal(r5)
54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
55 li r10,MSR_FP|MSR_FE0|MSR_FE1 55 li r10,MSR_FP|MSR_FE0|MSR_FE1
56 andc r4,r4,r10 /* disable FP for previous task */ 56 andc r4,r4,r10 /* disable FP for previous task */
57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
581: 581:
59#endif /* CONFIG_SMP */ 59#endif /* CONFIG_SMP */
60 /* enable use of FP after return */ 60 /* enable use of FP after return */
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
77#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
78 subi r4,r5,THREAD 78 subi r4,r5,THREAD
79 fromreal(r4) 79 fromreal(r4)
80 STL r4,OFF(last_task_used_math)(r3) 80 PPC_STL r4,OFF(last_task_used_math)(r3)
81#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
82 /* restore registers and return */ 82 /* restore registers and return */
83 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
@@ -97,24 +97,24 @@ _GLOBAL(giveup_fpu)
97 MTMSRD(r5) /* enable use of fpu now */ 97 MTMSRD(r5) /* enable use of fpu now */
98 SYNC_601 98 SYNC_601
99 isync 99 isync
100 CMPI 0,r3,0 100 PPC_LCMPI 0,r3,0
101 beqlr- /* if no previous owner, done */ 101 beqlr- /* if no previous owner, done */
102 addi r3,r3,THREAD /* want THREAD of task */ 102 addi r3,r3,THREAD /* want THREAD of task */
103 LDL r5,PT_REGS(r3) 103 PPC_LL r5,PT_REGS(r3)
104 CMPI 0,r5,0 104 PPC_LCMPI 0,r5,0
105 SAVE_32FPRS(0, r3) 105 SAVE_32FPRS(0, r3)
106 mffs fr0 106 mffs fr0
107 stfd fr0,THREAD_FPSCR(r3) 107 stfd fr0,THREAD_FPSCR(r3)
108 beq 1f 108 beq 1f
109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
110 li r3,MSR_FP|MSR_FE0|MSR_FE1 110 li r3,MSR_FP|MSR_FE0|MSR_FE1
111 andc r4,r4,r3 /* disable FP for previous task */ 111 andc r4,r4,r3 /* disable FP for previous task */
112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1131: 1131:
114#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
115 li r5,0 115 li r5,0
116 LOADBASE(r4,last_task_used_math) 116 LOADBASE(r4,last_task_used_math)
117 STL r5,OFF(last_task_used_math)(r4) 117 PPC_STL r5,OFF(last_task_used_math)(r4)
118#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
119 blr 119 blr
120 120
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 16ab40daa738..8a8bf79ef044 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -28,7 +28,6 @@
28#include <asm/reg.h> 28#include <asm/reg.h>
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h> 31#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h> 32#include <asm/asm-offsets.h>
34#include <asm/bug.h> 33#include <asm/bug.h>
@@ -1697,25 +1696,14 @@ _GLOBAL(pmac_secondary_start)
1697 * SPRG3 = paca virtual address 1696 * SPRG3 = paca virtual address
1698 */ 1697 */
1699_GLOBAL(__secondary_start) 1698_GLOBAL(__secondary_start)
1699 /* Set thread priority to MEDIUM */
1700 HMT_MEDIUM
1700 1701
1701 HMT_MEDIUM /* Set thread priority to MEDIUM */ 1702 /* Load TOC */
1702
1703 ld r2,PACATOC(r13) 1703 ld r2,PACATOC(r13)
1704 li r6,0 1704
1705 stb r6,PACAPROCENABLED(r13) 1705 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1706 1706 bl .early_setup_secondary
1707#ifndef CONFIG_PPC_ISERIES
1708 /* Initialize the page table pointer register. */
1709 LOADADDR(r6,_SDR1)
1710 ld r6,0(r6) /* get the value of _SDR1 */
1711 mtspr SPRN_SDR1,r6 /* set the htab location */
1712#endif
1713 /* Initialize the first segment table (or SLB) entry */
1714 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1715BEGIN_FTR_SECTION
1716 bl .stab_initialize
1717END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1718 bl .slb_initialize
1719 1707
1720 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1708 /* Initialize the kernel stack. Just a repeat for iSeries. */
1721 LOADADDR(r3,current_set) 1709 LOADADDR(r3,current_set)
@@ -1724,37 +1712,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1724 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1712 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1725 std r1,PACAKSAVE(r13) 1713 std r1,PACAKSAVE(r13)
1726 1714
1727 ld r3,PACASTABREAL(r13) /* get raddr of segment table */ 1715 /* Clear backchain so we get nice backtraces */
1728 ori r4,r3,1 /* turn on valid bit */
1729
1730#ifdef CONFIG_PPC_ISERIES
1731 li r0,-1 /* hypervisor call */
1732 li r3,1
1733 sldi r3,r3,63 /* 0x8000000000000000 */
1734 ori r3,r3,4 /* 0x8000000000000004 */
1735 sc /* HvCall_setASR */
1736#else
1737 /* set the ASR */
1738 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1739 ld r3,0(r3)
1740 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1741 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1742 beq 98f /* branch if result is 0 */
1743 mfspr r3,SPRN_PVR
1744 srwi r3,r3,16
1745 cmpwi r3,0x37 /* SStar */
1746 beq 97f
1747 cmpwi r3,0x36 /* IStar */
1748 beq 97f
1749 cmpwi r3,0x34 /* Pulsar */
1750 bne 98f
175197: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1752 HVSC /* Invoking hcall */
1753 b 99f
175498: /* !(rpa hypervisor) || !(star) */
1755 mtasr r4 /* set the stab location */
175699:
1757#endif
1758 li r7,0 1716 li r7,0
1759 mtlr r7 1717 mtlr r7
1760 1718
@@ -1777,6 +1735,7 @@ _GLOBAL(start_secondary_prolog)
1777 li r3,0 1735 li r3,0
1778 std r3,0(r1) /* Zero the stack frame pointer */ 1736 std r3,0(r1) /* Zero the stack frame pointer */
1779 bl .start_secondary 1737 bl .start_secondary
1738 b .
1780#endif 1739#endif
1781 1740
1782/* 1741/*
@@ -1896,40 +1855,6 @@ _STATIC(start_here_multiplatform)
1896 mr r3,r31 1855 mr r3,r31
1897 bl .early_setup 1856 bl .early_setup
1898 1857
1899 /* set the ASR */
1900 ld r3,PACASTABREAL(r13)
1901 ori r4,r3,1 /* turn on valid bit */
1902 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1903 ld r3,0(r3)
1904 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1905 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1906 beq 98f /* branch if result is 0 */
1907 mfspr r3,SPRN_PVR
1908 srwi r3,r3,16
1909 cmpwi r3,0x37 /* SStar */
1910 beq 97f
1911 cmpwi r3,0x36 /* IStar */
1912 beq 97f
1913 cmpwi r3,0x34 /* Pulsar */
1914 bne 98f
191597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1916 HVSC /* Invoking hcall */
1917 b 99f
191898: /* !(rpa hypervisor) || !(star) */
1919 mtasr r4 /* set the stab location */
192099:
1921 /* Set SDR1 (hash table pointer) */
1922 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1923 ld r3,0(r3)
1924 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1925 /* Test if bit 0 is set (LPAR bit) */
1926 andi. r3,r3,PLATFORM_LPAR
1927 bne 98f /* branch if result is !0 */
1928 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1929 add r6,r6,r26
1930 ld r6,0(r6) /* get the value of _SDR1 */
1931 mtspr SPRN_SDR1,r6 /* set the htab location */
193298:
1933 LOADADDR(r3,.start_here_common) 1858 LOADADDR(r3,.start_here_common)
1934 SET_REG_TO_CONST(r4, MSR_KERNEL) 1859 SET_REG_TO_CONST(r4, MSR_KERNEL)
1935 mtspr SPRN_SRR0,r3 1860 mtspr SPRN_SRR0,r3
diff --git a/arch/powerpc/kernel/ioctl32.c b/arch/powerpc/kernel/ioctl32.c
new file mode 100644
index 000000000000..3fa6a93adbd0
--- /dev/null
+++ b/arch/powerpc/kernel/ioctl32.c
@@ -0,0 +1,49 @@
1/*
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * Based on sparc64 ioctl32.c by:
5 *
6 * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
7 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
8 *
9 * ppc64 changes:
10 *
11 * Copyright (C) 2000 Ken Aaker (kdaaker@rchland.vnet.ibm.com)
12 * Copyright (C) 2001 Anton Blanchard (antonb@au.ibm.com)
13 *
14 * These routines maintain argument size conversion between 32bit and 64bit
15 * ioctls.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#define INCLUDES
24#include "compat_ioctl.c"
25#include <linux/syscalls.h>
26
27#define CODE
28#include "compat_ioctl.c"
29
30#define HANDLE_IOCTL(cmd,handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
31#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
32
33#define IOCTL_TABLE_START \
34 struct ioctl_trans ioctl_start[] = {
35#define IOCTL_TABLE_END \
36 };
37
38IOCTL_TABLE_START
39#include <linux/compat_ioctl.h>
40#define DECLARES
41#include "compat_ioctl.c"
42
43/* Little p (/dev/rtc, /dev/envctrl, etc.) */
44COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
45COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
46
47IOCTL_TABLE_END
48
49int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
new file mode 100644
index 000000000000..4b7940693f3d
--- /dev/null
+++ b/arch/powerpc/kernel/irq.c
@@ -0,0 +1,478 @@
1/*
2 * arch/ppc/kernel/irq.c
3 *
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
9 * Copyright (C) 1996-2001 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
23 * should be easier.
24 *
25 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
26 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
27 * mask register (of which only 16 are defined), hence the weird shifting
28 * and complement of the cached_irq_mask. I want to be able to stuff
29 * this right into the SIU SMASK register.
30 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
31 * to reduce code space and undefined function references.
32 */
33
34#include <linux/errno.h>
35#include <linux/module.h>
36#include <linux/threads.h>
37#include <linux/kernel_stat.h>
38#include <linux/signal.h>
39#include <linux/sched.h>
40#include <linux/ptrace.h>
41#include <linux/ioport.h>
42#include <linux/interrupt.h>
43#include <linux/timex.h>
44#include <linux/config.h>
45#include <linux/init.h>
46#include <linux/slab.h>
47#include <linux/pci.h>
48#include <linux/delay.h>
49#include <linux/irq.h>
50#include <linux/proc_fs.h>
51#include <linux/random.h>
52#include <linux/seq_file.h>
53#include <linux/cpumask.h>
54#include <linux/profile.h>
55#include <linux/bitops.h>
56#ifdef CONFIG_PPC64
57#include <linux/kallsyms.h>
58#endif
59
60#include <asm/uaccess.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pgtable.h>
64#include <asm/irq.h>
65#include <asm/cache.h>
66#include <asm/prom.h>
67#include <asm/ptrace.h>
68#include <asm/machdep.h>
69#ifdef CONFIG_PPC64
70#include <asm/iseries/it_lp_queue.h>
71#include <asm/paca.h>
72#endif
73
74static int ppc_spurious_interrupts;
75
76#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
77extern void iSeries_smp_message_recv(struct pt_regs *);
78#endif
79
80#ifdef CONFIG_PPC32
81#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
82
83unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
84atomic_t ppc_n_lost_interrupts;
85
86#ifdef CONFIG_TAU_INT
87extern int tau_initialized;
88extern int tau_interrupts(int);
89#endif
90
91#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
92extern atomic_t ipi_recv;
93extern atomic_t ipi_sent;
94#endif
95#endif /* CONFIG_PPC32 */
96
97#ifdef CONFIG_PPC64
98EXPORT_SYMBOL(irq_desc);
99
100int distribute_irqs = 1;
101int __irq_offset_value;
102u64 ppc64_interrupt_controller;
103#endif /* CONFIG_PPC64 */
104
105int show_interrupts(struct seq_file *p, void *v)
106{
107 int i = *(loff_t *)v, j;
108 struct irqaction *action;
109 irq_desc_t *desc;
110 unsigned long flags;
111
112 if (i == 0) {
113 seq_puts(p, " ");
114 for_each_online_cpu(j)
115 seq_printf(p, "CPU%d ", j);
116 seq_putc(p, '\n');
117 }
118
119 if (i < NR_IRQS) {
120 desc = get_irq_desc(i);
121 spin_lock_irqsave(&desc->lock, flags);
122 action = desc->action;
123 if (!action || !action->handler)
124 goto skip;
125 seq_printf(p, "%3d: ", i);
126#ifdef CONFIG_SMP
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
129#else
130 seq_printf(p, "%10u ", kstat_irqs(i));
131#endif /* CONFIG_SMP */
132 if (desc->handler)
133 seq_printf(p, " %s ", desc->handler->typename);
134 else
135 seq_puts(p, " None ");
136 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
137 seq_printf(p, " %s", action->name);
138 for (action = action->next; action; action = action->next)
139 seq_printf(p, ", %s", action->name);
140 seq_putc(p, '\n');
141skip:
142 spin_unlock_irqrestore(&desc->lock, flags);
143 } else if (i == NR_IRQS) {
144#ifdef CONFIG_PPC32
145#ifdef CONFIG_TAU_INT
146 if (tau_initialized){
147 seq_puts(p, "TAU: ");
148 for (j = 0; j < NR_CPUS; j++)
149 if (cpu_online(j))
150 seq_printf(p, "%10u ", tau_interrupts(j));
151 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
152 }
153#endif
154#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
155 /* should this be per processor send/receive? */
156 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
157 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
158#endif
159#endif /* CONFIG_PPC32 */
160 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
161 }
162 return 0;
163}
164
165#ifdef CONFIG_HOTPLUG_CPU
166void fixup_irqs(cpumask_t map)
167{
168 unsigned int irq;
169 static int warned;
170
171 for_each_irq(irq) {
172 cpumask_t mask;
173
174 if (irq_desc[irq].status & IRQ_PER_CPU)
175 continue;
176
177 cpus_and(mask, irq_affinity[irq], map);
178 if (any_online_cpu(mask) == NR_CPUS) {
179 printk("Breaking affinity for irq %i\n", irq);
180 mask = map;
181 }
182 if (irq_desc[irq].handler->set_affinity)
183 irq_desc[irq].handler->set_affinity(irq, mask);
184 else if (irq_desc[irq].action && !(warned++))
185 printk("Cannot set affinity for irq %i\n", irq);
186 }
187
188 local_irq_enable();
189 mdelay(1);
190 local_irq_disable();
191}
192#endif
193
194#ifdef CONFIG_PPC_ISERIES
195void do_IRQ(struct pt_regs *regs)
196{
197 struct paca_struct *lpaca;
198
199 irq_enter();
200
201#ifdef CONFIG_DEBUG_STACKOVERFLOW
202 /* Debugging check for stack overflow: is there less than 2KB free? */
203 {
204 long sp;
205
206 sp = __get_SP() & (THREAD_SIZE-1);
207
208 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
209 printk("do_IRQ: stack overflow: %ld\n",
210 sp - sizeof(struct thread_info));
211 dump_stack();
212 }
213 }
214#endif
215
216 lpaca = get_paca();
217#ifdef CONFIG_SMP
218 if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
219 lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
220 iSeries_smp_message_recv(regs);
221 }
222#endif /* CONFIG_SMP */
223 if (hvlpevent_is_pending())
224 process_hvlpevents(regs);
225
226 irq_exit();
227
228 if (lpaca->lppaca.int_dword.fields.decr_int) {
229 lpaca->lppaca.int_dword.fields.decr_int = 0;
230 /* Signal a fake decrementer interrupt */
231 timer_interrupt(regs);
232 }
233}
234
235#else /* CONFIG_PPC_ISERIES */
236
237void do_IRQ(struct pt_regs *regs)
238{
239 int irq;
240#ifdef CONFIG_IRQSTACKS
241 struct thread_info *curtp, *irqtp;
242#endif
243
244 irq_enter();
245
246#ifdef CONFIG_DEBUG_STACKOVERFLOW
247 /* Debugging check for stack overflow: is there less than 2KB free? */
248 {
249 long sp;
250
251 sp = __get_SP() & (THREAD_SIZE-1);
252
253 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
254 printk("do_IRQ: stack overflow: %ld\n",
255 sp - sizeof(struct thread_info));
256 dump_stack();
257 }
258 }
259#endif
260
261 /*
262 * Every platform is required to implement ppc_md.get_irq.
263 * This function will either return an irq number or -1 to
264 * indicate there are no more pending.
265 * The value -2 is for buggy hardware and means that this IRQ
266 * has already been handled. -- Tom
267 */
268 irq = ppc_md.get_irq(regs);
269
270 if (irq >= 0) {
271#ifdef CONFIG_IRQSTACKS
272 /* Switch to the irq stack to handle this */
273 curtp = current_thread_info();
274 irqtp = hardirq_ctx[smp_processor_id()];
275 if (curtp != irqtp) {
276 irqtp->task = curtp->task;
277 irqtp->flags = 0;
278 call___do_IRQ(irq, regs, irqtp);
279 irqtp->task = NULL;
280 if (irqtp->flags)
281 set_bits(irqtp->flags, &curtp->flags);
282 } else
283#endif
284 __do_IRQ(irq, regs);
285 } else
286#ifdef CONFIG_PPC32
287 if (irq != -2)
288#endif
289 /* That's not SMP safe ... but who cares ? */
290 ppc_spurious_interrupts++;
291 irq_exit();
292}
293
294#endif /* CONFIG_PPC_ISERIES */
295
296void __init init_IRQ(void)
297{
298#ifdef CONFIG_PPC64
299 static int once = 0;
300
301 if (once)
302 return;
303
304 once++;
305
306#endif
307 ppc_md.init_IRQ();
308#ifdef CONFIG_PPC64
309 irq_ctx_init();
310#endif
311}
312
313#ifdef CONFIG_PPC64
314#ifndef CONFIG_PPC_ISERIES
315/*
316 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
317 */
318
319#define UNDEFINED_IRQ 0xffffffff
320unsigned int virt_irq_to_real_map[NR_IRQS];
321
322/*
323 * Don't use virtual irqs 0, 1, 2 for devices.
324 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
325 * and 2 is the XICS IPI interrupt.
326 * We limit virtual irqs to 17 less than NR_IRQS so that when we
327 * offset them by 16 (to reserve the first 16 for ISA interrupts)
328 * we don't end up with an interrupt number >= NR_IRQS.
329 */
330#define MIN_VIRT_IRQ 3
331#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
332#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
333
334void
335virt_irq_init(void)
336{
337 int i;
338 for (i = 0; i < NR_IRQS; i++)
339 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
340}
341
342/* Create a mapping for a real_irq if it doesn't already exist.
343 * Return the virtual irq as a convenience.
344 */
345int virt_irq_create_mapping(unsigned int real_irq)
346{
347 unsigned int virq, first_virq;
348 static int warned;
349
350 if (ppc64_interrupt_controller == IC_OPEN_PIC)
351 return real_irq; /* no mapping for openpic (for now) */
352
353 if (ppc64_interrupt_controller == IC_CELL_PIC)
354 return real_irq; /* no mapping for iic either */
355
356 /* don't map interrupts < MIN_VIRT_IRQ */
357 if (real_irq < MIN_VIRT_IRQ) {
358 virt_irq_to_real_map[real_irq] = real_irq;
359 return real_irq;
360 }
361
362 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
363 virq = real_irq;
364 if (virq > MAX_VIRT_IRQ)
365 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
366
367 /* search for this number or a free slot */
368 first_virq = virq;
369 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
370 if (virt_irq_to_real_map[virq] == real_irq)
371 return virq;
372 if (++virq > MAX_VIRT_IRQ)
373 virq = MIN_VIRT_IRQ;
374 if (virq == first_virq)
375 goto nospace; /* oops, no free slots */
376 }
377
378 virt_irq_to_real_map[virq] = real_irq;
379 return virq;
380
381 nospace:
382 if (!warned) {
383 printk(KERN_CRIT "Interrupt table is full\n");
384 printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
385 "in your kernel sources and rebuild.\n", NR_IRQS);
386 warned = 1;
387 }
388 return NO_IRQ;
389}
390
391/*
392 * In most cases will get a hit on the very first slot checked in the
393 * virt_irq_to_real_map. Only when there are a large number of
394 * IRQs will this be expensive.
395 */
396unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
397{
398 unsigned int virq;
399 unsigned int first_virq;
400
401 virq = real_irq;
402
403 if (virq > MAX_VIRT_IRQ)
404 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
405
406 first_virq = virq;
407
408 do {
409 if (virt_irq_to_real_map[virq] == real_irq)
410 return virq;
411
412 virq++;
413
414 if (virq >= MAX_VIRT_IRQ)
415 virq = 0;
416
417 } while (first_virq != virq);
418
419 return NO_IRQ;
420
421}
422
423#endif /* CONFIG_PPC_ISERIES */
424
425#ifdef CONFIG_IRQSTACKS
426struct thread_info *softirq_ctx[NR_CPUS];
427struct thread_info *hardirq_ctx[NR_CPUS];
428
429void irq_ctx_init(void)
430{
431 struct thread_info *tp;
432 int i;
433
434 for_each_cpu(i) {
435 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
436 tp = softirq_ctx[i];
437 tp->cpu = i;
438 tp->preempt_count = SOFTIRQ_OFFSET;
439
440 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
441 tp = hardirq_ctx[i];
442 tp->cpu = i;
443 tp->preempt_count = HARDIRQ_OFFSET;
444 }
445}
446
447void do_softirq(void)
448{
449 unsigned long flags;
450 struct thread_info *curtp, *irqtp;
451
452 if (in_interrupt())
453 return;
454
455 local_irq_save(flags);
456
457 if (local_softirq_pending()) {
458 curtp = current_thread_info();
459 irqtp = softirq_ctx[smp_processor_id()];
460 irqtp->task = curtp->task;
461 call_do_softirq(irqtp);
462 irqtp->task = NULL;
463 }
464
465 local_irq_restore(flags);
466}
467EXPORT_SYMBOL(do_softirq);
468
469#endif /* CONFIG_IRQSTACKS */
470
471static int __init setup_noirqdistrib(char *str)
472{
473 distribute_irqs = 0;
474 return 1;
475}
476
477__setup("noirqdistrib", setup_noirqdistrib);
478#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
new file mode 100644
index 000000000000..5e954fae031f
--- /dev/null
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -0,0 +1,612 @@
1/*
2 * PowerPC64 LPAR Configuration Information Driver
3 *
4 * Dave Engebretsen engebret@us.ibm.com
5 * Copyright (c) 2003 Dave Engebretsen
6 * Will Schmidt willschm@us.ibm.com
7 * SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
8 * seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
9 * Nathan Lynch nathanl@austin.ibm.com
10 * Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * This driver creates a proc file at /proc/ppc64/lparcfg which contains
18 * keyword - value pairs that specify the configuration of the partition.
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/errno.h>
25#include <linux/proc_fs.h>
26#include <linux/init.h>
27#include <linux/seq_file.h>
28#include <asm/uaccess.h>
29#include <asm/iseries/hv_lp_config.h>
30#include <asm/lppaca.h>
31#include <asm/hvcall.h>
32#include <asm/firmware.h>
33#include <asm/rtas.h>
34#include <asm/system.h>
35#include <asm/time.h>
36#include <asm/iseries/it_exp_vpd_panel.h>
37#include <asm/prom.h>
38#include <asm/systemcfg.h>
39
40#define MODULE_VERS "1.6"
41#define MODULE_NAME "lparcfg"
42
43/* #define LPARCFG_DEBUG */
44
45/* find a better place for this function... */
46void log_plpar_hcall_return(unsigned long rc, char *tag)
47{
48 if (rc == 0) /* success, return */
49 return;
50/* check for null tag ? */
51 if (rc == H_Hardware)
52 printk(KERN_INFO
53 "plpar-hcall (%s) failed with hardware fault\n", tag);
54 else if (rc == H_Function)
55 printk(KERN_INFO
56 "plpar-hcall (%s) failed; function not allowed\n", tag);
57 else if (rc == H_Authority)
58 printk(KERN_INFO
59 "plpar-hcall (%s) failed; not authorized to this function\n",
60 tag);
61 else if (rc == H_Parameter)
62 printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
63 tag);
64 else
65 printk(KERN_INFO
66 "plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
67 tag, rc);
68
69}
70
71static struct proc_dir_entry *proc_ppc64_lparcfg;
72#define LPARCFG_BUFF_SIZE 4096
73
74#ifdef CONFIG_PPC_ISERIES
75
76/*
77 * For iSeries legacy systems, the PPA purr function is available from the
78 * emulated_time_base field in the paca.
79 */
80static unsigned long get_purr(void)
81{
82 unsigned long sum_purr = 0;
83 int cpu;
84 struct paca_struct *lpaca;
85
86 for_each_cpu(cpu) {
87 lpaca = paca + cpu;
88 sum_purr += lpaca->lppaca.emulated_time_base;
89
90#ifdef PURR_DEBUG
91 printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
92 cpu, lpaca->lppaca.emulated_time_base);
93#endif
94 }
95 return sum_purr;
96}
97
98#define lparcfg_write NULL
99
100/*
101 * Methods used to fetch LPAR data when running on an iSeries platform.
102 */
103static int lparcfg_data(struct seq_file *m, void *v)
104{
105 unsigned long pool_id, lp_index;
106 int shared, entitled_capacity, max_entitled_capacity;
107 int processors, max_processors;
108 struct paca_struct *lpaca = get_paca();
109 unsigned long purr = get_purr();
110
111 seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
112
113 shared = (int)(lpaca->lppaca_ptr->shared_proc);
114 seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
115 e2a(xItExtVpdPanel.mfgID[2]),
116 e2a(xItExtVpdPanel.mfgID[3]),
117 e2a(xItExtVpdPanel.systemSerial[1]),
118 e2a(xItExtVpdPanel.systemSerial[2]),
119 e2a(xItExtVpdPanel.systemSerial[3]),
120 e2a(xItExtVpdPanel.systemSerial[4]),
121 e2a(xItExtVpdPanel.systemSerial[5]));
122
123 seq_printf(m, "system_type=%c%c%c%c\n",
124 e2a(xItExtVpdPanel.machineType[0]),
125 e2a(xItExtVpdPanel.machineType[1]),
126 e2a(xItExtVpdPanel.machineType[2]),
127 e2a(xItExtVpdPanel.machineType[3]));
128
129 lp_index = HvLpConfig_getLpIndex();
130 seq_printf(m, "partition_id=%d\n", (int)lp_index);
131
132 seq_printf(m, "system_active_processors=%d\n",
133 (int)HvLpConfig_getSystemPhysicalProcessors());
134
135 seq_printf(m, "system_potential_processors=%d\n",
136 (int)HvLpConfig_getSystemPhysicalProcessors());
137
138 processors = (int)HvLpConfig_getPhysicalProcessors();
139 seq_printf(m, "partition_active_processors=%d\n", processors);
140
141 max_processors = (int)HvLpConfig_getMaxPhysicalProcessors();
142 seq_printf(m, "partition_potential_processors=%d\n", max_processors);
143
144 if (shared) {
145 entitled_capacity = HvLpConfig_getSharedProcUnits();
146 max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits();
147 } else {
148 entitled_capacity = processors * 100;
149 max_entitled_capacity = max_processors * 100;
150 }
151 seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity);
152
153 seq_printf(m, "partition_max_entitled_capacity=%d\n",
154 max_entitled_capacity);
155
156 if (shared) {
157 pool_id = HvLpConfig_getSharedPoolIndex();
158 seq_printf(m, "pool=%d\n", (int)pool_id);
159 seq_printf(m, "pool_capacity=%d\n",
160 (int)(HvLpConfig_getNumProcsInSharedPool(pool_id) *
161 100));
162 seq_printf(m, "purr=%ld\n", purr);
163 }
164
165 seq_printf(m, "shared_processor_mode=%d\n", shared);
166
167 return 0;
168}
169#endif /* CONFIG_PPC_ISERIES */
170
171#ifdef CONFIG_PPC_PSERIES
172/*
173 * Methods used to fetch LPAR data when running on a pSeries platform.
174 */
175
176/*
177 * H_GET_PPP hcall returns info in 4 parms.
178 * entitled_capacity,unallocated_capacity,
179 * aggregation, resource_capability).
180 *
181 * R4 = Entitled Processor Capacity Percentage.
182 * R5 = Unallocated Processor Capacity Percentage.
183 * R6 (AABBCCDDEEFFGGHH).
184 * XXXX - reserved (0)
185 * XXXX - reserved (0)
186 * XXXX - Group Number
187 * XXXX - Pool Number.
188 * R7 (IIJJKKLLMMNNOOPP).
189 * XX - reserved. (0)
190 * XX - bit 0-6 reserved (0). bit 7 is Capped indicator.
191 * XX - variable processor Capacity Weight
192 * XX - Unallocated Variable Processor Capacity Weight.
193 * XXXX - Active processors in Physical Processor Pool.
194 * XXXX - Processors active on platform.
195 */
196static unsigned int h_get_ppp(unsigned long *entitled,
197 unsigned long *unallocated,
198 unsigned long *aggregation,
199 unsigned long *resource)
200{
201 unsigned long rc;
202 rc = plpar_hcall_4out(H_GET_PPP, 0, 0, 0, 0, entitled, unallocated,
203 aggregation, resource);
204
205 log_plpar_hcall_return(rc, "H_GET_PPP");
206
207 return rc;
208}
209
210static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
211{
212 unsigned long rc;
213 unsigned long dummy;
214 rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy);
215
216 log_plpar_hcall_return(rc, "H_PIC");
217}
218
219static unsigned long get_purr(void);
220
221/* Track sum of all purrs across all processors. This is used to further */
222/* calculate usage values by different applications */
223
224static unsigned long get_purr(void)
225{
226 unsigned long sum_purr = 0;
227 int cpu;
228 struct cpu_usage *cu;
229
230 for_each_cpu(cpu) {
231 cu = &per_cpu(cpu_usage_array, cpu);
232 sum_purr += cu->current_tb;
233 }
234 return sum_purr;
235}
236
237#define SPLPAR_CHARACTERISTICS_TOKEN 20
238#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
239
240/*
241 * parse_system_parameter_string()
242 * Retrieve the potential_processors, max_entitled_capacity and friends
243 * through the get-system-parameter rtas call. Replace keyword strings as
244 * necessary.
245 */
246static void parse_system_parameter_string(struct seq_file *m)
247{
248 int call_status;
249
250 char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
251 if (!local_buffer) {
252 printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
253 __FILE__, __FUNCTION__, __LINE__);
254 return;
255 }
256
257 spin_lock(&rtas_data_buf_lock);
258 memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
259 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
260 NULL,
261 SPLPAR_CHARACTERISTICS_TOKEN,
262 __pa(rtas_data_buf));
263 memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
264 spin_unlock(&rtas_data_buf_lock);
265
266 if (call_status != 0) {
267 printk(KERN_INFO
268 "%s %s Error calling get-system-parameter (0x%x)\n",
269 __FILE__, __FUNCTION__, call_status);
270 } else {
271 int splpar_strlen;
272 int idx, w_idx;
273 char *workbuffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
274 if (!workbuffer) {
275 printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
276 __FILE__, __FUNCTION__, __LINE__);
277 kfree(local_buffer);
278 return;
279 }
280#ifdef LPARCFG_DEBUG
281 printk(KERN_INFO "success calling get-system-parameter \n");
282#endif
283 splpar_strlen = local_buffer[0] * 16 + local_buffer[1];
284 local_buffer += 2; /* step over strlen value */
285
286 memset(workbuffer, 0, SPLPAR_MAXLENGTH);
287 w_idx = 0;
288 idx = 0;
289 while ((*local_buffer) && (idx < splpar_strlen)) {
290 workbuffer[w_idx++] = local_buffer[idx++];
291 if ((local_buffer[idx] == ',')
292 || (local_buffer[idx] == '\0')) {
293 workbuffer[w_idx] = '\0';
294 if (w_idx) {
295 /* avoid the empty string */
296 seq_printf(m, "%s\n", workbuffer);
297 }
298 memset(workbuffer, 0, SPLPAR_MAXLENGTH);
299 idx++; /* skip the comma */
300 w_idx = 0;
301 } else if (local_buffer[idx] == '=') {
302 /* code here to replace workbuffer contents
303 with different keyword strings */
304 if (0 == strcmp(workbuffer, "MaxEntCap")) {
305 strcpy(workbuffer,
306 "partition_max_entitled_capacity");
307 w_idx = strlen(workbuffer);
308 }
309 if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
310 strcpy(workbuffer,
311 "system_potential_processors");
312 w_idx = strlen(workbuffer);
313 }
314 }
315 }
316 kfree(workbuffer);
317 local_buffer -= 2; /* back up over strlen value */
318 }
319 kfree(local_buffer);
320}
321
322static int lparcfg_count_active_processors(void);
323
324/* Return the number of processors in the system.
325 * This function reads through the device tree and counts
326 * the virtual processors, this does not include threads.
327 */
328static int lparcfg_count_active_processors(void)
329{
330 struct device_node *cpus_dn = NULL;
331 int count = 0;
332
333 while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
334#ifdef LPARCFG_DEBUG
335 printk(KERN_ERR "cpus_dn %p \n", cpus_dn);
336#endif
337 count++;
338 }
339 return count;
340}
341
342static int lparcfg_data(struct seq_file *m, void *v)
343{
344 int partition_potential_processors;
345 int partition_active_processors;
346 struct device_node *rootdn;
347 const char *model = "";
348 const char *system_id = "";
349 unsigned int *lp_index_ptr, lp_index = 0;
350 struct device_node *rtas_node;
351 int *lrdrp;
352
353 rootdn = find_path_device("/");
354 if (rootdn) {
355 model = get_property(rootdn, "model", NULL);
356 system_id = get_property(rootdn, "system-id", NULL);
357 lp_index_ptr = (unsigned int *)
358 get_property(rootdn, "ibm,partition-no", NULL);
359 if (lp_index_ptr)
360 lp_index = *lp_index_ptr;
361 }
362
363 seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
364
365 seq_printf(m, "serial_number=%s\n", system_id);
366
367 seq_printf(m, "system_type=%s\n", model);
368
369 seq_printf(m, "partition_id=%d\n", (int)lp_index);
370
371 rtas_node = find_path_device("/rtas");
372 lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
373
374 if (lrdrp == NULL) {
375 partition_potential_processors = _systemcfg->processorCount;
376 } else {
377 partition_potential_processors = *(lrdrp + 4);
378 }
379
380 partition_active_processors = lparcfg_count_active_processors();
381
382 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
383 unsigned long h_entitled, h_unallocated;
384 unsigned long h_aggregation, h_resource;
385 unsigned long pool_idle_time, pool_procs;
386 unsigned long purr;
387
388 h_get_ppp(&h_entitled, &h_unallocated, &h_aggregation,
389 &h_resource);
390
391 seq_printf(m, "R4=0x%lx\n", h_entitled);
392 seq_printf(m, "R5=0x%lx\n", h_unallocated);
393 seq_printf(m, "R6=0x%lx\n", h_aggregation);
394 seq_printf(m, "R7=0x%lx\n", h_resource);
395
396 purr = get_purr();
397
398 /* this call handles the ibm,get-system-parameter contents */
399 parse_system_parameter_string(m);
400
401 seq_printf(m, "partition_entitled_capacity=%ld\n", h_entitled);
402
403 seq_printf(m, "group=%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
404
405 seq_printf(m, "system_active_processors=%ld\n",
406 (h_resource >> 0 * 8) & 0xffff);
407
408 /* pool related entries are apropriate for shared configs */
409 if (paca[0].lppaca.shared_proc) {
410
411 h_pic(&pool_idle_time, &pool_procs);
412
413 seq_printf(m, "pool=%ld\n",
414 (h_aggregation >> 0 * 8) & 0xffff);
415
416 /* report pool_capacity in percentage */
417 seq_printf(m, "pool_capacity=%ld\n",
418 ((h_resource >> 2 * 8) & 0xffff) * 100);
419
420 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
421
422 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
423 }
424
425 seq_printf(m, "unallocated_capacity_weight=%ld\n",
426 (h_resource >> 4 * 8) & 0xFF);
427
428 seq_printf(m, "capacity_weight=%ld\n",
429 (h_resource >> 5 * 8) & 0xFF);
430
431 seq_printf(m, "capped=%ld\n", (h_resource >> 6 * 8) & 0x01);
432
433 seq_printf(m, "unallocated_capacity=%ld\n", h_unallocated);
434
435 seq_printf(m, "purr=%ld\n", purr);
436
437 } else { /* non SPLPAR case */
438
439 seq_printf(m, "system_active_processors=%d\n",
440 partition_potential_processors);
441
442 seq_printf(m, "system_potential_processors=%d\n",
443 partition_potential_processors);
444
445 seq_printf(m, "partition_max_entitled_capacity=%d\n",
446 partition_potential_processors * 100);
447
448 seq_printf(m, "partition_entitled_capacity=%d\n",
449 partition_active_processors * 100);
450 }
451
452 seq_printf(m, "partition_active_processors=%d\n",
453 partition_active_processors);
454
455 seq_printf(m, "partition_potential_processors=%d\n",
456 partition_potential_processors);
457
458 seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc);
459
460 return 0;
461}
462
463/*
464 * Interface for changing system parameters (variable capacity weight
465 * and entitled capacity). Format of input is "param_name=value";
466 * anything after value is ignored. Valid parameters at this time are
467 * "partition_entitled_capacity" and "capacity_weight". We use
468 * H_SET_PPP to alter parameters.
469 *
470 * This function should be invoked only on systems with
471 * FW_FEATURE_SPLPAR.
472 */
473static ssize_t lparcfg_write(struct file *file, const char __user * buf,
474 size_t count, loff_t * off)
475{
476 char *kbuf;
477 char *tmp;
478 u64 new_entitled, *new_entitled_ptr = &new_entitled;
479 u8 new_weight, *new_weight_ptr = &new_weight;
480
481 unsigned long current_entitled; /* parameters for h_get_ppp */
482 unsigned long dummy;
483 unsigned long resource;
484 u8 current_weight;
485
486 ssize_t retval = -ENOMEM;
487
488 kbuf = kmalloc(count, GFP_KERNEL);
489 if (!kbuf)
490 goto out;
491
492 retval = -EFAULT;
493 if (copy_from_user(kbuf, buf, count))
494 goto out;
495
496 retval = -EINVAL;
497 kbuf[count - 1] = '\0';
498 tmp = strchr(kbuf, '=');
499 if (!tmp)
500 goto out;
501
502 *tmp++ = '\0';
503
504 if (!strcmp(kbuf, "partition_entitled_capacity")) {
505 char *endp;
506 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
507 if (endp == tmp)
508 goto out;
509 new_weight_ptr = &current_weight;
510 } else if (!strcmp(kbuf, "capacity_weight")) {
511 char *endp;
512 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
513 if (endp == tmp)
514 goto out;
515 new_entitled_ptr = &current_entitled;
516 } else
517 goto out;
518
519 /* Get our current parameters */
520 retval = h_get_ppp(&current_entitled, &dummy, &dummy, &resource);
521 if (retval) {
522 retval = -EIO;
523 goto out;
524 }
525
526 current_weight = (resource >> 5 * 8) & 0xFF;
527
528 pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
529 __FUNCTION__, current_entitled, current_weight);
530
531 pr_debug("%s: new_entitled = %lu, new_weight = %lu\n",
532 __FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
533
534 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
535 *new_weight_ptr);
536
537 if (retval == H_Success || retval == H_Constrained) {
538 retval = count;
539 } else if (retval == H_Busy) {
540 retval = -EBUSY;
541 } else if (retval == H_Hardware) {
542 retval = -EIO;
543 } else if (retval == H_Parameter) {
544 retval = -EINVAL;
545 } else {
546 printk(KERN_WARNING "%s: received unknown hv return code %ld",
547 __FUNCTION__, retval);
548 retval = -EIO;
549 }
550
551 out:
552 kfree(kbuf);
553 return retval;
554}
555
556#endif /* CONFIG_PPC_PSERIES */
557
558static int lparcfg_open(struct inode *inode, struct file *file)
559{
560 return single_open(file, lparcfg_data, NULL);
561}
562
563struct file_operations lparcfg_fops = {
564 .owner = THIS_MODULE,
565 .read = seq_read,
566 .open = lparcfg_open,
567 .release = single_release,
568};
569
570int __init lparcfg_init(void)
571{
572 struct proc_dir_entry *ent;
573 mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
574
575 /* Allow writing if we have FW_FEATURE_SPLPAR */
576 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
577 lparcfg_fops.write = lparcfg_write;
578 mode |= S_IWUSR;
579 }
580
581 ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
582 if (ent) {
583 ent->proc_fops = &lparcfg_fops;
584 ent->data = kmalloc(LPARCFG_BUFF_SIZE, GFP_KERNEL);
585 if (!ent->data) {
586 printk(KERN_ERR
587 "Failed to allocate buffer for lparcfg\n");
588 remove_proc_entry("lparcfg", ent->parent);
589 return -ENOMEM;
590 }
591 } else {
592 printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
593 return -EIO;
594 }
595
596 proc_ppc64_lparcfg = ent;
597 return 0;
598}
599
600void __exit lparcfg_cleanup(void)
601{
602 if (proc_ppc64_lparcfg) {
603 kfree(proc_ppc64_lparcfg->data);
604 remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
605 }
606}
607
608module_init(lparcfg_init);
609module_exit(lparcfg_cleanup);
610MODULE_DESCRIPTION("Interface for LPAR configuration data");
611MODULE_AUTHOR("Dave Engebretsen");
612MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 3bedb532aed9..f6d84a75ed26 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -519,7 +519,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
519 * 519 *
520 * flush_icache_range(unsigned long start, unsigned long stop) 520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */ 521 */
522_GLOBAL(flush_icache_range) 522_GLOBAL(__flush_icache_range)
523BEGIN_FTR_SECTION 523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */ 524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
@@ -607,27 +607,6 @@ _GLOBAL(invalidate_dcache_range)
607 sync /* wait for dcbi's to get to ram */ 607 sync /* wait for dcbi's to get to ram */
608 blr 608 blr
609 609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
626 addi r5, r5, L1_CACHE_BYTES
627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/* 610/*
632 * Flush a particular page from the data cache to RAM. 611 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not* 612 * Note: this is necessary because the instruction cache does *not*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae1433da09b2..ae48a002f81a 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq)
89 mtlr r0 89 mtlr r0
90 blr 90 blr
91 91
92_GLOBAL(call_handle_IRQ_event) 92_GLOBAL(call___do_IRQ)
93 mflr r0 93 mflr r0
94 std r0,16(r1) 94 std r0,16(r1)
95 stdu r1,THREAD_SIZE-112(r6) 95 stdu r1,THREAD_SIZE-112(r5)
96 mr r1,r6 96 mr r1,r5
97 bl .handle_IRQ_event 97 bl .__do_IRQ
98 ld r1,0(r1) 98 ld r1,0(r1)
99 ld r0,16(r1) 99 ld r0,16(r1)
100 mtlr r0 100 mtlr r0
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
new file mode 100644
index 000000000000..3cf2517c5f91
--- /dev/null
+++ b/arch/powerpc/kernel/paca.c
@@ -0,0 +1,142 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/config.h>
11#include <linux/types.h>
12#include <linux/threads.h>
13#include <linux/module.h>
14
15#include <asm/processor.h>
16#include <asm/ptrace.h>
17#include <asm/page.h>
18#include <asm/systemcfg.h>
19#include <asm/lppaca.h>
20#include <asm/iseries/it_lp_queue.h>
21#include <asm/paca.h>
22
23static union {
24 struct systemcfg data;
25 u8 page[PAGE_SIZE];
26} systemcfg_store __attribute__((__section__(".data.page.aligned")));
27struct systemcfg *_systemcfg = &systemcfg_store.data;
28
29
30/* This symbol is provided by the linker - let it fill in the paca
31 * field correctly */
32extern unsigned long __toc_start;
33
34/* The Paca is an array with one entry per processor. Each contains an
35 * lppaca, which contains the information shared between the
36 * hypervisor and Linux. Each also contains an ItLpRegSave area which
37 * is used by the hypervisor to save registers.
38 * On systems with hardware multi-threading, there are two threads
39 * per processor. The Paca array must contain an entry for each thread.
40 * The VPD Areas will give a max logical processors = 2 * max physical
41 * processors. The processor VPD array needs one entry per physical
42 * processor (not thread).
43 */
44#define PACA_INIT_COMMON(number, start, asrr, asrv) \
45 .lock_token = 0x8000, \
46 .paca_index = (number), /* Paca Index */ \
47 .default_decr = 0x00ff0000, /* Initial Decr */ \
48 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
49 .stab_real = (asrr), /* Real pointer to segment table */ \
50 .stab_addr = (asrv), /* Virt pointer to segment table */ \
51 .cpu_start = (start), /* Processor start */ \
52 .hw_cpu_id = 0xffff, \
53 .lppaca = { \
54 .desc = 0xd397d781, /* "LpPa" */ \
55 .size = sizeof(struct lppaca), \
56 .dyn_proc_status = 2, \
57 .decr_val = 0x00ff0000, \
58 .fpregs_in_use = 1, \
59 .end_of_quantum = 0xfffffffffffffffful, \
60 .slb_count = 64, \
61 .vmxregs_in_use = 0, \
62 }, \
63
64#ifdef CONFIG_PPC_ISERIES
65#define PACA_INIT_ISERIES(number) \
66 .lppaca_ptr = &paca[number].lppaca, \
67 .reg_save_ptr = &paca[number].reg_save, \
68 .reg_save = { \
69 .xDesc = 0xd397d9e2, /* "LpRS" */ \
70 .xSize = sizeof(struct ItLpRegSave) \
71 }
72
73#define PACA_INIT(number) \
74{ \
75 PACA_INIT_COMMON(number, 0, 0, 0) \
76 PACA_INIT_ISERIES(number) \
77}
78
79#define BOOTCPU_PACA_INIT(number) \
80{ \
81 PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \
82 PACA_INIT_ISERIES(number) \
83}
84
85#else
86#define PACA_INIT(number) \
87{ \
88 PACA_INIT_COMMON(number, 0, 0, 0) \
89}
90
91#define BOOTCPU_PACA_INIT(number) \
92{ \
93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \
94}
95#endif
96
97struct paca_struct paca[] = {
98 BOOTCPU_PACA_INIT(0),
99#if NR_CPUS > 1
100 PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3),
101#if NR_CPUS > 4
102 PACA_INIT( 4), PACA_INIT( 5), PACA_INIT( 6), PACA_INIT( 7),
103#if NR_CPUS > 8
104 PACA_INIT( 8), PACA_INIT( 9), PACA_INIT( 10), PACA_INIT( 11),
105 PACA_INIT( 12), PACA_INIT( 13), PACA_INIT( 14), PACA_INIT( 15),
106 PACA_INIT( 16), PACA_INIT( 17), PACA_INIT( 18), PACA_INIT( 19),
107 PACA_INIT( 20), PACA_INIT( 21), PACA_INIT( 22), PACA_INIT( 23),
108 PACA_INIT( 24), PACA_INIT( 25), PACA_INIT( 26), PACA_INIT( 27),
109 PACA_INIT( 28), PACA_INIT( 29), PACA_INIT( 30), PACA_INIT( 31),
110#if NR_CPUS > 32
111 PACA_INIT( 32), PACA_INIT( 33), PACA_INIT( 34), PACA_INIT( 35),
112 PACA_INIT( 36), PACA_INIT( 37), PACA_INIT( 38), PACA_INIT( 39),
113 PACA_INIT( 40), PACA_INIT( 41), PACA_INIT( 42), PACA_INIT( 43),
114 PACA_INIT( 44), PACA_INIT( 45), PACA_INIT( 46), PACA_INIT( 47),
115 PACA_INIT( 48), PACA_INIT( 49), PACA_INIT( 50), PACA_INIT( 51),
116 PACA_INIT( 52), PACA_INIT( 53), PACA_INIT( 54), PACA_INIT( 55),
117 PACA_INIT( 56), PACA_INIT( 57), PACA_INIT( 58), PACA_INIT( 59),
118 PACA_INIT( 60), PACA_INIT( 61), PACA_INIT( 62), PACA_INIT( 63),
119#if NR_CPUS > 64
120 PACA_INIT( 64), PACA_INIT( 65), PACA_INIT( 66), PACA_INIT( 67),
121 PACA_INIT( 68), PACA_INIT( 69), PACA_INIT( 70), PACA_INIT( 71),
122 PACA_INIT( 72), PACA_INIT( 73), PACA_INIT( 74), PACA_INIT( 75),
123 PACA_INIT( 76), PACA_INIT( 77), PACA_INIT( 78), PACA_INIT( 79),
124 PACA_INIT( 80), PACA_INIT( 81), PACA_INIT( 82), PACA_INIT( 83),
125 PACA_INIT( 84), PACA_INIT( 85), PACA_INIT( 86), PACA_INIT( 87),
126 PACA_INIT( 88), PACA_INIT( 89), PACA_INIT( 90), PACA_INIT( 91),
127 PACA_INIT( 92), PACA_INIT( 93), PACA_INIT( 94), PACA_INIT( 95),
128 PACA_INIT( 96), PACA_INIT( 97), PACA_INIT( 98), PACA_INIT( 99),
129 PACA_INIT(100), PACA_INIT(101), PACA_INIT(102), PACA_INIT(103),
130 PACA_INIT(104), PACA_INIT(105), PACA_INIT(106), PACA_INIT(107),
131 PACA_INIT(108), PACA_INIT(109), PACA_INIT(110), PACA_INIT(111),
132 PACA_INIT(112), PACA_INIT(113), PACA_INIT(114), PACA_INIT(115),
133 PACA_INIT(116), PACA_INIT(117), PACA_INIT(118), PACA_INIT(119),
134 PACA_INIT(120), PACA_INIT(121), PACA_INIT(122), PACA_INIT(123),
135 PACA_INIT(124), PACA_INIT(125), PACA_INIT(126), PACA_INIT(127),
136#endif
137#endif
138#endif
139#endif
140#endif
141};
142EXPORT_SYMBOL(paca);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 47d6f7e2ea9f..5dcf4ba05ee8 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -44,6 +44,7 @@
44#include <asm/cputable.h> 44#include <asm/cputable.h>
45#include <asm/btext.h> 45#include <asm/btext.h>
46#include <asm/div64.h> 46#include <asm/div64.h>
47#include <asm/signal.h>
47 48
48#ifdef CONFIG_8xx 49#ifdef CONFIG_8xx
49#include <asm/commproc.h> 50#include <asm/commproc.h>
@@ -56,7 +57,6 @@ extern void machine_check_exception(struct pt_regs *regs);
56extern void alignment_exception(struct pt_regs *regs); 57extern void alignment_exception(struct pt_regs *regs);
57extern void program_check_exception(struct pt_regs *regs); 58extern void program_check_exception(struct pt_regs *regs);
58extern void single_step_exception(struct pt_regs *regs); 59extern void single_step_exception(struct pt_regs *regs);
59extern int do_signal(sigset_t *, struct pt_regs *);
60extern int pmac_newworld; 60extern int pmac_newworld;
61extern int sys_sigreturn(struct pt_regs *regs); 61extern int sys_sigreturn(struct pt_regs *regs);
62 62
@@ -188,9 +188,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
188EXPORT_SYMBOL(cuda_request); 188EXPORT_SYMBOL(cuda_request);
189EXPORT_SYMBOL(cuda_poll); 189EXPORT_SYMBOL(cuda_poll);
190#endif /* CONFIG_ADB_CUDA */ 190#endif /* CONFIG_ADB_CUDA */
191#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
192EXPORT_SYMBOL(_machine);
193#endif
194#ifdef CONFIG_PPC_PMAC 191#ifdef CONFIG_PPC_PMAC
195EXPORT_SYMBOL(sys_ctrler); 192EXPORT_SYMBOL(sys_ctrler);
196#endif 193#endif
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
new file mode 100644
index 000000000000..a1c19502fe8b
--- /dev/null
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/config.h>
20#include <linux/init.h>
21#include <linux/mm.h>
22#include <linux/proc_fs.h>
23#include <linux/slab.h>
24#include <linux/kernel.h>
25
26#include <asm/systemcfg.h>
27#include <asm/rtas.h>
28#include <asm/uaccess.h>
29#include <asm/prom.h>
30
31static loff_t page_map_seek( struct file *file, loff_t off, int whence);
32static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
33 loff_t *ppos);
34static int page_map_mmap( struct file *file, struct vm_area_struct *vma );
35
36static struct file_operations page_map_fops = {
37 .llseek = page_map_seek,
38 .read = page_map_read,
39 .mmap = page_map_mmap
40};
41
42/*
43 * Create the ppc64 and ppc64/rtas directories early. This allows us to
44 * assume that they have been previously created in drivers.
45 */
46static int __init proc_ppc64_create(void)
47{
48 struct proc_dir_entry *root;
49
50 root = proc_mkdir("ppc64", NULL);
51 if (!root)
52 return 1;
53
54 if (!(platform_is_pseries() || _machine == PLATFORM_CELL))
55 return 0;
56
57 if (!proc_mkdir("rtas", root))
58 return 1;
59
60 if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
61 return 1;
62
63 return 0;
64}
65core_initcall(proc_ppc64_create);
66
67static int __init proc_ppc64_init(void)
68{
69 struct proc_dir_entry *pde;
70
71 pde = create_proc_entry("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL);
72 if (!pde)
73 return 1;
74 pde->nlink = 1;
75 pde->data = _systemcfg;
76 pde->size = PAGE_SIZE;
77 pde->proc_fops = &page_map_fops;
78
79 return 0;
80}
81__initcall(proc_ppc64_init);
82
83static loff_t page_map_seek( struct file *file, loff_t off, int whence)
84{
85 loff_t new;
86 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
87
88 switch(whence) {
89 case 0:
90 new = off;
91 break;
92 case 1:
93 new = file->f_pos + off;
94 break;
95 case 2:
96 new = dp->size + off;
97 break;
98 default:
99 return -EINVAL;
100 }
101 if ( new < 0 || new > dp->size )
102 return -EINVAL;
103 return (file->f_pos = new);
104}
105
106static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
107 loff_t *ppos)
108{
109 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
110 return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
111}
112
113static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
114{
115 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
116
117 vma->vm_flags |= VM_SHM | VM_LOCKED;
118
119 if ((vma->vm_end - vma->vm_start) > dp->size)
120 return -EINVAL;
121
122 remap_pfn_range(vma, vma->vm_start, __pa(dp->data) >> PAGE_SHIFT,
123 dp->size, vma->vm_page_prot);
124 return 0;
125}
126
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f645adb57534..6a5b468edb4d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -48,9 +48,6 @@
48#include <asm/machdep.h> 48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h> 49#include <asm/pSeries_reconfig.h>
50#include <asm/pci-bridge.h> 50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
54 51
55#ifdef DEBUG 52#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt) 53#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -74,10 +71,6 @@ struct isa_reg_property {
74typedef int interpret_func(struct device_node *, unsigned long *, 71typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int); 72 int, int, int);
76 73
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static int __initdata dt_root_addr_cells; 74static int __initdata dt_root_addr_cells;
82static int __initdata dt_root_size_cells; 75static int __initdata dt_root_size_cells;
83 76
@@ -391,7 +384,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
391 384
392#ifdef CONFIG_PPC64 385#ifdef CONFIG_PPC64
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 386 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) { 387 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL); 388 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3")) 389 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128; 390 np->intrs[intrcount].line += 128;
@@ -1087,9 +1080,9 @@ void __init unflatten_device_tree(void)
1087static int __init early_init_dt_scan_cpus(unsigned long node, 1080static int __init early_init_dt_scan_cpus(unsigned long node,
1088 const char *uname, int depth, void *data) 1081 const char *uname, int depth, void *data)
1089{ 1082{
1090 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1091 u32 *prop; 1083 u32 *prop;
1092 unsigned long size = 0; 1084 unsigned long size;
1085 char *type = of_get_flat_dt_prop(node, "device_type", &size);
1093 1086
1094 /* We are scanning "cpu" nodes only */ 1087 /* We are scanning "cpu" nodes only */
1095 if (type == NULL || strcmp(type, "cpu") != 0) 1088 if (type == NULL || strcmp(type, "cpu") != 0)
@@ -1115,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1115 1108
1116#ifdef CONFIG_ALTIVEC 1109#ifdef CONFIG_ALTIVEC
1117 /* Check if we have a VMX and eventually update CPU features */ 1110 /* Check if we have a VMX and eventually update CPU features */
1118 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size); 1111 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1119 if (prop && (*prop) > 0) { 1112 if (prop && (*prop) > 0) {
1120 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1113 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1121 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1114 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1161,13 +1154,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1161 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL); 1154 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1162 if (prop == NULL) 1155 if (prop == NULL)
1163 return 0; 1156 return 0;
1164#ifdef CONFIG_PPC64
1165 systemcfg->platform = *prop;
1166#else
1167#ifdef CONFIG_PPC_MULTIPLATFORM 1157#ifdef CONFIG_PPC_MULTIPLATFORM
1168 _machine = *prop; 1158 _machine = *prop;
1169#endif 1159#endif
1170#endif
1171 1160
1172#ifdef CONFIG_PPC64 1161#ifdef CONFIG_PPC64
1173 /* check if iommu is forced on or off */ 1162 /* check if iommu is forced on or off */
@@ -1264,7 +1253,14 @@ static int __init early_init_dt_scan_memory(unsigned long node,
1264 unsigned long l; 1253 unsigned long l;
1265 1254
1266 /* We are scanning "memory" nodes only */ 1255 /* We are scanning "memory" nodes only */
1267 if (type == NULL || strcmp(type, "memory") != 0) 1256 if (type == NULL) {
1257 /*
1258 * The longtrail doesn't have a device_type on the
1259 * /memory node, so look for the node called /memory@0.
1260 */
1261 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1262 return 0;
1263 } else if (strcmp(type, "memory") != 0)
1268 return 0; 1264 return 0;
1269 1265
1270 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 1266 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
@@ -1339,9 +1335,6 @@ void __init early_init_devtree(void *params)
1339 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1335 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1340 lmb_enforce_memory_limit(memory_limit); 1336 lmb_enforce_memory_limit(memory_limit);
1341 lmb_analyze(); 1337 lmb_analyze();
1342#ifdef CONFIG_PPC64
1343 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1344#endif
1345 lmb_reserve(0, __pa(klimit)); 1338 lmb_reserve(0, __pa(klimit));
1346 1339
1347 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1340 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1908,7 +1901,7 @@ static int of_finish_dynamic_node(struct device_node *node,
1908 /* We don't support that function on PowerMac, at least 1901 /* We don't support that function on PowerMac, at least
1909 * not yet 1902 * not yet
1910 */ 1903 */
1911 if (systemcfg->platform == PLATFORM_POWERMAC) 1904 if (_machine == PLATFORM_POWERMAC)
1912 return -ENODEV; 1905 return -ENODEV;
1913 1906
1914 /* fix up new node's linux_phandle field */ 1907 /* fix up new node's linux_phandle field */
@@ -1992,9 +1985,11 @@ int prom_add_property(struct device_node* np, struct property* prop)
1992 *next = prop; 1985 *next = prop;
1993 write_unlock(&devtree_lock); 1986 write_unlock(&devtree_lock);
1994 1987
1988#ifdef CONFIG_PROC_DEVICETREE
1995 /* try to add to proc as well if it was initialized */ 1989 /* try to add to proc as well if it was initialized */
1996 if (np->pde) 1990 if (np->pde)
1997 proc_device_tree_add_prop(np->pde, prop); 1991 proc_device_tree_add_prop(np->pde, prop);
1992#endif /* CONFIG_PROC_DEVICETREE */
1998 1993
1999 return 0; 1994 return 0;
2000} 1995}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 6dc33d19fc2a..4ce0105c308e 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -94,11 +94,17 @@ extern const struct linux_logo logo_linux_clut224;
94#ifdef CONFIG_PPC64 94#ifdef CONFIG_PPC64
95#define RELOC(x) (*PTRRELOC(&(x))) 95#define RELOC(x) (*PTRRELOC(&(x)))
96#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x)) 96#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97#define OF_WORKAROUNDS 0
97#else 98#else
98#define RELOC(x) (x) 99#define RELOC(x) (x)
99#define ADDR(x) (u32) (x) 100#define ADDR(x) (u32) (x)
101#define OF_WORKAROUNDS of_workarounds
102int of_workarounds;
100#endif 103#endif
101 104
105#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
106#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
107
102#define PROM_BUG() do { \ 108#define PROM_BUG() do { \
103 prom_printf("kernel BUG at %s line 0x%x!\n", \ 109 prom_printf("kernel BUG at %s line 0x%x!\n", \
104 RELOC(__FILE__), __LINE__); \ 110 RELOC(__FILE__), __LINE__); \
@@ -111,11 +117,6 @@ extern const struct linux_logo logo_linux_clut224;
111#define prom_debug(x...) 117#define prom_debug(x...)
112#endif 118#endif
113 119
114#ifdef CONFIG_PPC32
115#define PLATFORM_POWERMAC _MACH_Pmac
116#define PLATFORM_CHRP _MACH_chrp
117#endif
118
119 120
120typedef u32 prom_arg_t; 121typedef u32 prom_arg_t;
121 122
@@ -128,10 +129,11 @@ struct prom_args {
128 129
129struct prom_t { 130struct prom_t {
130 ihandle root; 131 ihandle root;
131 ihandle chosen; 132 phandle chosen;
132 int cpu; 133 int cpu;
133 ihandle stdout; 134 ihandle stdout;
134 ihandle mmumap; 135 ihandle mmumap;
136 ihandle memory;
135}; 137};
136 138
137struct mem_map_entry { 139struct mem_map_entry {
@@ -360,16 +362,36 @@ static void __init prom_printf(const char *format, ...)
360static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 362static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
361 unsigned long align) 363 unsigned long align)
362{ 364{
363 int ret;
364 struct prom_t *_prom = &RELOC(prom); 365 struct prom_t *_prom = &RELOC(prom);
365 366
366 ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 367 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
367 (prom_arg_t)align); 368 /*
368 if (ret != -1 && _prom->mmumap != 0) 369 * Old OF requires we claim physical and virtual separately
369 /* old pmacs need us to map as well */ 370 * and then map explicitly (assuming virtual mode)
371 */
372 int ret;
373 prom_arg_t result;
374
375 ret = call_prom_ret("call-method", 5, 2, &result,
376 ADDR("claim"), _prom->memory,
377 align, size, virt);
378 if (ret != 0 || result == -1)
379 return -1;
380 ret = call_prom_ret("call-method", 5, 2, &result,
381 ADDR("claim"), _prom->mmumap,
382 align, size, virt);
383 if (ret != 0) {
384 call_prom("call-method", 4, 1, ADDR("release"),
385 _prom->memory, size, virt);
386 return -1;
387 }
388 /* the 0x12 is M (coherence) + PP == read/write */
370 call_prom("call-method", 6, 1, 389 call_prom("call-method", 6, 1,
371 ADDR("map"), _prom->mmumap, 0, size, virt, virt); 390 ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
372 return ret; 391 return virt;
392 }
393 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
394 (prom_arg_t)align);
373} 395}
374 396
375static void __init __attribute__((noreturn)) prom_panic(const char *reason) 397static void __init __attribute__((noreturn)) prom_panic(const char *reason)
@@ -415,11 +437,52 @@ static int inline prom_getproplen(phandle node, const char *pname)
415 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 437 return call_prom("getproplen", 2, 1, node, ADDR(pname));
416} 438}
417 439
418static int inline prom_setprop(phandle node, const char *pname, 440static void add_string(char **str, const char *q)
419 void *value, size_t valuelen)
420{ 441{
421 return call_prom("setprop", 4, 1, node, ADDR(pname), 442 char *p = *str;
422 (u32)(unsigned long) value, (u32) valuelen); 443
444 while (*q)
445 *p++ = *q++;
446 *p++ = ' ';
447 *str = p;
448}
449
450static char *tohex(unsigned int x)
451{
452 static char digits[] = "0123456789abcdef";
453 static char result[9];
454 int i;
455
456 result[8] = 0;
457 i = 8;
458 do {
459 --i;
460 result[i] = digits[x & 0xf];
461 x >>= 4;
462 } while (x != 0 && i > 0);
463 return &result[i];
464}
465
466static int __init prom_setprop(phandle node, const char *nodename,
467 const char *pname, void *value, size_t valuelen)
468{
469 char cmd[256], *p;
470
471 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
472 return call_prom("setprop", 4, 1, node, ADDR(pname),
473 (u32)(unsigned long) value, (u32) valuelen);
474
475 /* gah... setprop doesn't work on longtrail, have to use interpret */
476 p = cmd;
477 add_string(&p, "dev");
478 add_string(&p, nodename);
479 add_string(&p, tohex((u32)(unsigned long) value));
480 add_string(&p, tohex(valuelen));
481 add_string(&p, tohex(ADDR(pname)));
482 add_string(&p, tohex(strlen(RELOC(pname))));
483 add_string(&p, "property");
484 *p = 0;
485 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
423} 486}
424 487
425/* We can't use the standard versions because of RELOC headaches. */ 488/* We can't use the standard versions because of RELOC headaches. */
@@ -980,7 +1043,7 @@ static void __init prom_instantiate_rtas(void)
980 1043
981 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1044 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
982 if (!IHANDLE_VALID(rtas_inst)) { 1045 if (!IHANDLE_VALID(rtas_inst)) {
983 prom_printf("opening rtas package failed"); 1046 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
984 return; 1047 return;
985 } 1048 }
986 1049
@@ -988,7 +1051,7 @@ static void __init prom_instantiate_rtas(void)
988 1051
989 if (call_prom_ret("call-method", 3, 2, &entry, 1052 if (call_prom_ret("call-method", 3, 2, &entry,
990 ADDR("instantiate-rtas"), 1053 ADDR("instantiate-rtas"),
991 rtas_inst, base) == PROM_ERROR 1054 rtas_inst, base) != 0
992 || entry == 0) { 1055 || entry == 0) {
993 prom_printf(" failed\n"); 1056 prom_printf(" failed\n");
994 return; 1057 return;
@@ -997,8 +1060,10 @@ static void __init prom_instantiate_rtas(void)
997 1060
998 reserve_mem(base, size); 1061 reserve_mem(base, size);
999 1062
1000 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base)); 1063 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1001 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry)); 1064 &base, sizeof(base));
1065 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1066 &entry, sizeof(entry));
1002 1067
1003 prom_debug("rtas base = 0x%x\n", base); 1068 prom_debug("rtas base = 0x%x\n", base);
1004 prom_debug("rtas entry = 0x%x\n", entry); 1069 prom_debug("rtas entry = 0x%x\n", entry);
@@ -1089,10 +1154,6 @@ static void __init prom_initialize_tce_table(void)
1089 if (base < local_alloc_bottom) 1154 if (base < local_alloc_bottom)
1090 local_alloc_bottom = base; 1155 local_alloc_bottom = base;
1091 1156
1092 /* Save away the TCE table attributes for later use. */
1093 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1094 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1095
1096 /* It seems OF doesn't null-terminate the path :-( */ 1157 /* It seems OF doesn't null-terminate the path :-( */
1097 memset(path, 0, sizeof(path)); 1158 memset(path, 0, sizeof(path));
1098 /* Call OF to setup the TCE hardware */ 1159 /* Call OF to setup the TCE hardware */
@@ -1101,6 +1162,10 @@ static void __init prom_initialize_tce_table(void)
1101 prom_printf("package-to-path failed\n"); 1162 prom_printf("package-to-path failed\n");
1102 } 1163 }
1103 1164
1165 /* Save away the TCE table attributes for later use. */
1166 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1167 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1168
1104 prom_debug("TCE table: %s\n", path); 1169 prom_debug("TCE table: %s\n", path);
1105 prom_debug("\tnode = 0x%x\n", node); 1170 prom_debug("\tnode = 0x%x\n", node);
1106 prom_debug("\tbase = 0x%x\n", base); 1171 prom_debug("\tbase = 0x%x\n", base);
@@ -1342,6 +1407,7 @@ static void __init prom_init_client_services(unsigned long pp)
1342/* 1407/*
1343 * For really old powermacs, we need to map things we claim. 1408 * For really old powermacs, we need to map things we claim.
1344 * For that, we need the ihandle of the mmu. 1409 * For that, we need the ihandle of the mmu.
1410 * Also, on the longtrail, we need to work around other bugs.
1345 */ 1411 */
1346static void __init prom_find_mmu(void) 1412static void __init prom_find_mmu(void)
1347{ 1413{
@@ -1355,12 +1421,19 @@ static void __init prom_find_mmu(void)
1355 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 1421 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1356 return; 1422 return;
1357 version[sizeof(version) - 1] = 0; 1423 version[sizeof(version) - 1] = 0;
1358 prom_printf("OF version is '%s'\n", version);
1359 /* XXX might need to add other versions here */ 1424 /* XXX might need to add other versions here */
1360 if (strcmp(version, "Open Firmware, 1.0.5") != 0) 1425 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1426 of_workarounds = OF_WA_CLAIM;
1427 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1428 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1429 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1430 } else
1361 return; 1431 return;
1432 _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
1362 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap, 1433 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1363 sizeof(_prom->mmumap)); 1434 sizeof(_prom->mmumap));
1435 if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
1436 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1364} 1437}
1365#else 1438#else
1366#define prom_find_mmu() 1439#define prom_find_mmu()
@@ -1382,16 +1455,17 @@ static void __init prom_init_stdout(void)
1382 memset(path, 0, 256); 1455 memset(path, 0, 256);
1383 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255); 1456 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1384 val = call_prom("instance-to-package", 1, 1, _prom->stdout); 1457 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1385 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val)); 1458 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
1459 &val, sizeof(val));
1386 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device)); 1460 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1387 prom_setprop(_prom->chosen, "linux,stdout-path", 1461 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
1388 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1); 1462 path, strlen(path) + 1);
1389 1463
1390 /* If it's a display, note it */ 1464 /* If it's a display, note it */
1391 memset(type, 0, sizeof(type)); 1465 memset(type, 0, sizeof(type));
1392 prom_getprop(val, "device_type", type, sizeof(type)); 1466 prom_getprop(val, "device_type", type, sizeof(type));
1393 if (strcmp(type, RELOC("display")) == 0) 1467 if (strcmp(type, RELOC("display")) == 0)
1394 prom_setprop(val, "linux,boot-display", NULL, 0); 1468 prom_setprop(val, path, "linux,boot-display", NULL, 0);
1395} 1469}
1396 1470
1397static void __init prom_close_stdin(void) 1471static void __init prom_close_stdin(void)
@@ -1514,7 +1588,7 @@ static void __init prom_check_displays(void)
1514 1588
1515 /* Success */ 1589 /* Success */
1516 prom_printf("done\n"); 1590 prom_printf("done\n");
1517 prom_setprop(node, "linux,opened", NULL, 0); 1591 prom_setprop(node, path, "linux,opened", NULL, 0);
1518 1592
1519 /* Setup a usable color table when the appropriate 1593 /* Setup a usable color table when the appropriate
1520 * method is available. Should update this to set-colors */ 1594 * method is available. Should update this to set-colors */
@@ -1884,9 +1958,11 @@ static void __init fixup_device_tree(void)
1884 /* interrupt on this revision of u3 is number 0 and level */ 1958 /* interrupt on this revision of u3 is number 0 and level */
1885 interrupts[0] = 0; 1959 interrupts[0] = 0;
1886 interrupts[1] = 1; 1960 interrupts[1] = 1;
1887 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts)); 1961 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
1962 &interrupts, sizeof(interrupts));
1888 parent = (u32)mpic; 1963 parent = (u32)mpic;
1889 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent)); 1964 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
1965 &parent, sizeof(parent));
1890#endif 1966#endif
1891} 1967}
1892 1968
@@ -1922,11 +1998,11 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1922 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4; 1998 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1923 1999
1924 val = RELOC(prom_initrd_start); 2000 val = RELOC(prom_initrd_start);
1925 prom_setprop(_prom->chosen, "linux,initrd-start", &val, 2001 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
1926 sizeof(val)); 2002 &val, sizeof(val));
1927 val = RELOC(prom_initrd_end); 2003 val = RELOC(prom_initrd_end);
1928 prom_setprop(_prom->chosen, "linux,initrd-end", &val, 2004 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
1929 sizeof(val)); 2005 &val, sizeof(val));
1930 2006
1931 reserve_mem(RELOC(prom_initrd_start), 2007 reserve_mem(RELOC(prom_initrd_start),
1932 RELOC(prom_initrd_end) - RELOC(prom_initrd_start)); 2008 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
@@ -1969,14 +2045,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1969 prom_init_client_services(pp); 2045 prom_init_client_services(pp);
1970 2046
1971 /* 2047 /*
1972 * Init prom stdout device 2048 * See if this OF is old enough that we need to do explicit maps
2049 * and other workarounds
1973 */ 2050 */
1974 prom_init_stdout(); 2051 prom_find_mmu();
1975 2052
1976 /* 2053 /*
1977 * See if this OF is old enough that we need to do explicit maps 2054 * Init prom stdout device
1978 */ 2055 */
1979 prom_find_mmu(); 2056 prom_init_stdout();
1980 2057
1981 /* 2058 /*
1982 * Check for an initrd 2059 * Check for an initrd
@@ -1989,14 +2066,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1989 */ 2066 */
1990 RELOC(of_platform) = prom_find_machine_type(); 2067 RELOC(of_platform) = prom_find_machine_type();
1991 getprop_rval = RELOC(of_platform); 2068 getprop_rval = RELOC(of_platform);
1992 prom_setprop(_prom->chosen, "linux,platform", 2069 prom_setprop(_prom->chosen, "/chosen", "linux,platform",
1993 &getprop_rval, sizeof(getprop_rval)); 2070 &getprop_rval, sizeof(getprop_rval));
1994 2071
1995#ifdef CONFIG_PPC_PSERIES 2072#ifdef CONFIG_PPC_PSERIES
1996 /* 2073 /*
1997 * On pSeries, inform the firmware about our capabilities 2074 * On pSeries, inform the firmware about our capabilities
1998 */ 2075 */
1999 if (RELOC(of_platform) & PLATFORM_PSERIES) 2076 if (RELOC(of_platform) == PLATFORM_PSERIES ||
2077 RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
2000 prom_send_capabilities(); 2078 prom_send_capabilities();
2001#endif 2079#endif
2002 2080
@@ -2050,21 +2128,23 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2050 * Fill in some infos for use by the kernel later on 2128 * Fill in some infos for use by the kernel later on
2051 */ 2129 */
2052 if (RELOC(prom_memory_limit)) 2130 if (RELOC(prom_memory_limit))
2053 prom_setprop(_prom->chosen, "linux,memory-limit", 2131 prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
2054 &RELOC(prom_memory_limit), 2132 &RELOC(prom_memory_limit),
2055 sizeof(prom_memory_limit)); 2133 sizeof(prom_memory_limit));
2056#ifdef CONFIG_PPC64 2134#ifdef CONFIG_PPC64
2057 if (RELOC(ppc64_iommu_off)) 2135 if (RELOC(ppc64_iommu_off))
2058 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0); 2136 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
2137 NULL, 0);
2059 2138
2060 if (RELOC(iommu_force_on)) 2139 if (RELOC(iommu_force_on))
2061 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0); 2140 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
2141 NULL, 0);
2062 2142
2063 if (RELOC(prom_tce_alloc_start)) { 2143 if (RELOC(prom_tce_alloc_start)) {
2064 prom_setprop(_prom->chosen, "linux,tce-alloc-start", 2144 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
2065 &RELOC(prom_tce_alloc_start), 2145 &RELOC(prom_tce_alloc_start),
2066 sizeof(prom_tce_alloc_start)); 2146 sizeof(prom_tce_alloc_start));
2067 prom_setprop(_prom->chosen, "linux,tce-alloc-end", 2147 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
2068 &RELOC(prom_tce_alloc_end), 2148 &RELOC(prom_tce_alloc_end),
2069 sizeof(prom_tce_alloc_end)); 2149 sizeof(prom_tce_alloc_end));
2070 } 2150 }
@@ -2081,8 +2161,13 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2081 prom_printf("copying OF device tree ...\n"); 2161 prom_printf("copying OF device tree ...\n");
2082 flatten_device_tree(); 2162 flatten_device_tree();
2083 2163
2084 /* in case stdin is USB and still active on IBM machines... */ 2164 /*
2085 prom_close_stdin(); 2165 * in case stdin is USB and still active on IBM machines...
2166 * Unfortunately quiesce crashes on some powermacs if we have
2167 * closed stdin already (in particular the powerbook 101).
2168 */
2169 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2170 prom_close_stdin();
2086 2171
2087 /* 2172 /*
2088 * Call OF "quiesce" method to shut down pending DMA's from 2173 * Call OF "quiesce" method to shut down pending DMA's from
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 5bdd5b079d96..ae1a36449ccd 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -259,7 +259,7 @@ static int __init proc_rtas_init(void)
259{ 259{
260 struct proc_dir_entry *entry; 260 struct proc_dir_entry *entry;
261 261
262 if (!(systemcfg->platform & PLATFORM_PSERIES)) 262 if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR)
263 return 1; 263 return 1;
264 264
265 rtas_node = of_find_node_by_name(NULL, "rtas"); 265 rtas_node = of_find_node_by_name(NULL, "rtas");
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 9d4e07f6f1ec..4283fa33f784 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -29,9 +29,6 @@
29#include <asm/delay.h> 29#include <asm/delay.h>
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/lmb.h> 31#include <asm/lmb.h>
32#ifdef CONFIG_PPC64
33#include <asm/systemcfg.h>
34#endif
35 32
36struct rtas_t rtas = { 33struct rtas_t rtas = {
37 .lock = SPIN_LOCK_UNLOCKED 34 .lock = SPIN_LOCK_UNLOCKED
@@ -671,7 +668,7 @@ void __init rtas_initialize(void)
671 * the stop-self token if any 668 * the stop-self token if any
672 */ 669 */
673#ifdef CONFIG_PPC64 670#ifdef CONFIG_PPC64
674 if (systemcfg->platform == PLATFORM_PSERIES_LPAR) 671 if (_machine == PLATFORM_PSERIES_LPAR)
675 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 672 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
676#endif 673#endif
677 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); 674 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
new file mode 100644
index 000000000000..0e5a8e116653
--- /dev/null
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -0,0 +1,513 @@
1/*
2 * arch/ppc64/kernel/rtas_pci.c
3 *
4 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * RTAS specific routines for PCI.
8 *
9 * Based on code from pci.c, chrp_pci.c and pSeries_pci.c
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/threads.h>
28#include <linux/pci.h>
29#include <linux/string.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32
33#include <asm/io.h>
34#include <asm/pgtable.h>
35#include <asm/irq.h>
36#include <asm/prom.h>
37#include <asm/machdep.h>
38#include <asm/pci-bridge.h>
39#include <asm/iommu.h>
40#include <asm/rtas.h>
41#include <asm/mpic.h>
42#include <asm/ppc-pci.h>
43
44/* RTAS tokens */
45static int read_pci_config;
46static int write_pci_config;
47static int ibm_read_pci_config;
48static int ibm_write_pci_config;
49
50static inline int config_access_valid(struct pci_dn *dn, int where)
51{
52 if (where < 256)
53 return 1;
54 if (where < 4096 && dn->pci_ext_config_space)
55 return 1;
56
57 return 0;
58}
59
60static int of_device_available(struct device_node * dn)
61{
62 char * status;
63
64 status = get_property(dn, "status", NULL);
65
66 if (!status)
67 return 1;
68
69 if (!strcmp(status, "okay"))
70 return 1;
71
72 return 0;
73}
74
75static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
76{
77 int returnval = -1;
78 unsigned long buid, addr;
79 int ret;
80
81 if (!pdn)
82 return PCIBIOS_DEVICE_NOT_FOUND;
83 if (!config_access_valid(pdn, where))
84 return PCIBIOS_BAD_REGISTER_NUMBER;
85
86 addr = ((where & 0xf00) << 20) | (pdn->busno << 16) |
87 (pdn->devfn << 8) | (where & 0xff);
88 buid = pdn->phb->buid;
89 if (buid) {
90 ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
91 addr, BUID_HI(buid), BUID_LO(buid), size);
92 } else {
93 ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
94 }
95 *val = returnval;
96
97 if (ret)
98 return PCIBIOS_DEVICE_NOT_FOUND;
99
100 if (returnval == EEH_IO_ERROR_VALUE(size) &&
101 eeh_dn_check_failure (pdn->node, NULL))
102 return PCIBIOS_DEVICE_NOT_FOUND;
103
104 return PCIBIOS_SUCCESSFUL;
105}
106
107static int rtas_pci_read_config(struct pci_bus *bus,
108 unsigned int devfn,
109 int where, int size, u32 *val)
110{
111 struct device_node *busdn, *dn;
112
113 if (bus->self)
114 busdn = pci_device_to_OF_node(bus->self);
115 else
116 busdn = bus->sysdata; /* must be a phb */
117
118 /* Search only direct children of the bus */
119 for (dn = busdn->child; dn; dn = dn->sibling) {
120 struct pci_dn *pdn = PCI_DN(dn);
121 if (pdn && pdn->devfn == devfn
122 && of_device_available(dn))
123 return rtas_read_config(pdn, where, size, val);
124 }
125
126 return PCIBIOS_DEVICE_NOT_FOUND;
127}
128
129int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
130{
131 unsigned long buid, addr;
132 int ret;
133
134 if (!pdn)
135 return PCIBIOS_DEVICE_NOT_FOUND;
136 if (!config_access_valid(pdn, where))
137 return PCIBIOS_BAD_REGISTER_NUMBER;
138
139 addr = ((where & 0xf00) << 20) | (pdn->busno << 16) |
140 (pdn->devfn << 8) | (where & 0xff);
141 buid = pdn->phb->buid;
142 if (buid) {
143 ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr,
144 BUID_HI(buid), BUID_LO(buid), size, (ulong) val);
145 } else {
146 ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
147 }
148
149 if (ret)
150 return PCIBIOS_DEVICE_NOT_FOUND;
151
152 return PCIBIOS_SUCCESSFUL;
153}
154
155static int rtas_pci_write_config(struct pci_bus *bus,
156 unsigned int devfn,
157 int where, int size, u32 val)
158{
159 struct device_node *busdn, *dn;
160
161 if (bus->self)
162 busdn = pci_device_to_OF_node(bus->self);
163 else
164 busdn = bus->sysdata; /* must be a phb */
165
166 /* Search only direct children of the bus */
167 for (dn = busdn->child; dn; dn = dn->sibling) {
168 struct pci_dn *pdn = PCI_DN(dn);
169 if (pdn && pdn->devfn == devfn
170 && of_device_available(dn))
171 return rtas_write_config(pdn, where, size, val);
172 }
173 return PCIBIOS_DEVICE_NOT_FOUND;
174}
175
176struct pci_ops rtas_pci_ops = {
177 rtas_pci_read_config,
178 rtas_pci_write_config
179};
180
181int is_python(struct device_node *dev)
182{
183 char *model = (char *)get_property(dev, "model", NULL);
184
185 if (model && strstr(model, "Python"))
186 return 1;
187
188 return 0;
189}
190
191static int get_phb_reg_prop(struct device_node *dev,
192 unsigned int addr_size_words,
193 struct reg_property64 *reg)
194{
195 unsigned int *ui_ptr = NULL, len;
196
197 /* Found a PHB, now figure out where his registers are mapped. */
198 ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
199 if (ui_ptr == NULL)
200 return 1;
201
202 if (addr_size_words == 1) {
203 reg->address = ((struct reg_property32 *)ui_ptr)->address;
204 reg->size = ((struct reg_property32 *)ui_ptr)->size;
205 } else {
206 *reg = *((struct reg_property64 *)ui_ptr);
207 }
208
209 return 0;
210}
211
212static void python_countermeasures(struct device_node *dev,
213 unsigned int addr_size_words)
214{
215 struct reg_property64 reg_struct;
216 void __iomem *chip_regs;
217 volatile u32 val;
218
219 if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
220 return;
221
222 /* Python's register file is 1 MB in size. */
223 chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
224
225 /*
226 * Firmware doesn't always clear this bit which is critical
227 * for good performance - Anton
228 */
229
230#define PRG_CL_RESET_VALID 0x00010000
231
232 val = in_be32(chip_regs + 0xf6030);
233 if (val & PRG_CL_RESET_VALID) {
234 printk(KERN_INFO "Python workaround: ");
235 val &= ~PRG_CL_RESET_VALID;
236 out_be32(chip_regs + 0xf6030, val);
237 /*
238 * We must read it back for changes to
239 * take effect
240 */
241 val = in_be32(chip_regs + 0xf6030);
242 printk("reg0: %x\n", val);
243 }
244
245 iounmap(chip_regs);
246}
247
248void __init init_pci_config_tokens (void)
249{
250 read_pci_config = rtas_token("read-pci-config");
251 write_pci_config = rtas_token("write-pci-config");
252 ibm_read_pci_config = rtas_token("ibm,read-pci-config");
253 ibm_write_pci_config = rtas_token("ibm,write-pci-config");
254}
255
256unsigned long __devinit get_phb_buid (struct device_node *phb)
257{
258 int addr_cells;
259 unsigned int *buid_vals;
260 unsigned int len;
261 unsigned long buid;
262
263 if (ibm_read_pci_config == -1) return 0;
264
265 /* PHB's will always be children of the root node,
266 * or so it is promised by the current firmware. */
267 if (phb->parent == NULL)
268 return 0;
269 if (phb->parent->parent)
270 return 0;
271
272 buid_vals = (unsigned int *) get_property(phb, "reg", &len);
273 if (buid_vals == NULL)
274 return 0;
275
276 addr_cells = prom_n_addr_cells(phb);
277 if (addr_cells == 1) {
278 buid = (unsigned long) buid_vals[0];
279 } else {
280 buid = (((unsigned long)buid_vals[0]) << 32UL) |
281 (((unsigned long)buid_vals[1]) & 0xffffffff);
282 }
283 return buid;
284}
285
286static int phb_set_bus_ranges(struct device_node *dev,
287 struct pci_controller *phb)
288{
289 int *bus_range;
290 unsigned int len;
291
292 bus_range = (int *) get_property(dev, "bus-range", &len);
293 if (bus_range == NULL || len < 2 * sizeof(int)) {
294 return 1;
295 }
296
297 phb->first_busno = bus_range[0];
298 phb->last_busno = bus_range[1];
299
300 return 0;
301}
302
303static int __devinit setup_phb(struct device_node *dev,
304 struct pci_controller *phb,
305 unsigned int addr_size_words)
306{
307 pci_setup_pci_controller(phb);
308
309 if (is_python(dev))
310 python_countermeasures(dev, addr_size_words);
311
312 if (phb_set_bus_ranges(dev, phb))
313 return 1;
314
315 phb->arch_data = dev;
316 phb->ops = &rtas_pci_ops;
317 phb->buid = get_phb_buid(dev);
318
319 return 0;
320}
321
322static void __devinit add_linux_pci_domain(struct device_node *dev,
323 struct pci_controller *phb,
324 struct property *of_prop)
325{
326 memset(of_prop, 0, sizeof(struct property));
327 of_prop->name = "linux,pci-domain";
328 of_prop->length = sizeof(phb->global_number);
329 of_prop->value = (unsigned char *)&of_prop[1];
330 memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number));
331 prom_add_property(dev, of_prop);
332}
333
334static struct pci_controller * __init alloc_phb(struct device_node *dev,
335 unsigned int addr_size_words)
336{
337 struct pci_controller *phb;
338 struct property *of_prop;
339
340 phb = alloc_bootmem(sizeof(struct pci_controller));
341 if (phb == NULL)
342 return NULL;
343
344 of_prop = alloc_bootmem(sizeof(struct property) +
345 sizeof(phb->global_number));
346 if (!of_prop)
347 return NULL;
348
349 if (setup_phb(dev, phb, addr_size_words))
350 return NULL;
351
352 add_linux_pci_domain(dev, phb, of_prop);
353
354 return phb;
355}
356
357static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words)
358{
359 struct pci_controller *phb;
360
361 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
362 GFP_KERNEL);
363 if (phb == NULL)
364 return NULL;
365
366 if (setup_phb(dev, phb, addr_size_words))
367 return NULL;
368
369 phb->is_dynamic = 1;
370
371 /* TODO: linux,pci-domain? */
372
373 return phb;
374}
375
376unsigned long __init find_and_init_phbs(void)
377{
378 struct device_node *node;
379 struct pci_controller *phb;
380 unsigned int root_size_cells = 0;
381 unsigned int index;
382 unsigned int *opprop = NULL;
383 struct device_node *root = of_find_node_by_path("/");
384
385 if (ppc64_interrupt_controller == IC_OPEN_PIC) {
386 opprop = (unsigned int *)get_property(root,
387 "platform-open-pic", NULL);
388 }
389
390 root_size_cells = prom_n_size_cells(root);
391
392 index = 0;
393
394 for (node = of_get_next_child(root, NULL);
395 node != NULL;
396 node = of_get_next_child(root, node)) {
397 if (node->type == NULL || strcmp(node->type, "pci") != 0)
398 continue;
399
400 phb = alloc_phb(node, root_size_cells);
401 if (!phb)
402 continue;
403
404 pci_process_bridge_OF_ranges(phb, node, 0);
405 pci_setup_phb_io(phb, index == 0);
406#ifdef CONFIG_PPC_PSERIES
407 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
408 int addr = root_size_cells * (index + 2) - 1;
409 mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
410 }
411#endif
412 index++;
413 }
414
415 of_node_put(root);
416 pci_devs_phb_init();
417
418 /*
419 * pci_probe_only and pci_assign_all_buses can be set via properties
420 * in chosen.
421 */
422 if (of_chosen) {
423 int *prop;
424
425 prop = (int *)get_property(of_chosen, "linux,pci-probe-only",
426 NULL);
427 if (prop)
428 pci_probe_only = *prop;
429
430 prop = (int *)get_property(of_chosen,
431 "linux,pci-assign-all-buses", NULL);
432 if (prop)
433 pci_assign_all_buses = *prop;
434 }
435
436 return 0;
437}
438
439struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
440{
441 struct device_node *root = of_find_node_by_path("/");
442 unsigned int root_size_cells = 0;
443 struct pci_controller *phb;
444 int primary;
445
446 root_size_cells = prom_n_size_cells(root);
447
448 primary = list_empty(&hose_list);
449 phb = alloc_phb_dynamic(dn, root_size_cells);
450 if (!phb)
451 return NULL;
452
453 pci_process_bridge_OF_ranges(phb, dn, primary);
454
455 pci_setup_phb_io_dynamic(phb, primary);
456 of_node_put(root);
457
458 pci_devs_phb_init_dynamic(phb);
459 scan_phb(phb);
460
461 return phb;
462}
463EXPORT_SYMBOL(init_phb_dynamic);
464
465/* RPA-specific bits for removing PHBs */
466int pcibios_remove_root_bus(struct pci_controller *phb)
467{
468 struct pci_bus *b = phb->bus;
469 struct resource *res;
470 int rc, i;
471
472 res = b->resource[0];
473 if (!res->flags) {
474 printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__,
475 b->name);
476 return 1;
477 }
478
479 rc = unmap_bus_range(b);
480 if (rc) {
481 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
482 __FUNCTION__, b->name);
483 return 1;
484 }
485
486 if (release_resource(res)) {
487 printk(KERN_ERR "%s: failed to release IO on bus %s\n",
488 __FUNCTION__, b->name);
489 return 1;
490 }
491
492 for (i = 1; i < 3; ++i) {
493 res = b->resource[i];
494 if (!res->flags && i == 0) {
495 printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
496 __FUNCTION__, b->name);
497 return 1;
498 }
499 if (res->flags && release_resource(res)) {
500 printk(KERN_ERR
501 "%s: failed to release IO %d on bus %s\n",
502 __FUNCTION__, i, b->name);
503 return 1;
504 }
505 }
506
507 list_del(&phb->list_node);
508 if (phb->is_dynamic)
509 kfree(phb);
510
511 return 0;
512}
513EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index e22856ecb5a0..bae4bff138f1 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/processor.h> 35#include <asm/processor.h>
36#include <asm/systemcfg.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/smp.h> 38#include <asm/smp.h>
38#include <asm/elf.h> 39#include <asm/elf.h>
@@ -51,6 +52,9 @@
51#include <asm/page.h> 52#include <asm/page.h>
52#include <asm/mmu.h> 53#include <asm/mmu.h>
53#include <asm/lmb.h> 54#include <asm/lmb.h>
55#include <asm/xmon.h>
56
57#include "setup.h"
54 58
55#undef DEBUG 59#undef DEBUG
56 60
@@ -60,6 +64,13 @@
60#define DBG(fmt...) 64#define DBG(fmt...)
61#endif 65#endif
62 66
67#ifdef CONFIG_PPC_MULTIPLATFORM
68int _machine = 0;
69EXPORT_SYMBOL(_machine);
70#endif
71
72unsigned long klimit = (unsigned long) _end;
73
63/* 74/*
64 * This still seems to be needed... -- paulus 75 * This still seems to be needed... -- paulus
65 */ 76 */
@@ -510,8 +521,8 @@ void __init smp_setup_cpu_maps(void)
510 * On pSeries LPAR, we need to know how many cpus 521 * On pSeries LPAR, we need to know how many cpus
511 * could possibly be added to this partition. 522 * could possibly be added to this partition.
512 */ 523 */
513 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && 524 if (_machine == PLATFORM_PSERIES_LPAR &&
514 (dn = of_find_node_by_path("/rtas"))) { 525 (dn = of_find_node_by_path("/rtas"))) {
515 int num_addr_cell, num_size_cell, maxcpus; 526 int num_addr_cell, num_size_cell, maxcpus;
516 unsigned int *ireg; 527 unsigned int *ireg;
517 528
@@ -555,7 +566,27 @@ void __init smp_setup_cpu_maps(void)
555 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 566 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
556 } 567 }
557 568
558 systemcfg->processorCount = num_present_cpus(); 569 _systemcfg->processorCount = num_present_cpus();
559#endif /* CONFIG_PPC64 */ 570#endif /* CONFIG_PPC64 */
560} 571}
561#endif /* CONFIG_SMP */ 572#endif /* CONFIG_SMP */
573
574#ifdef CONFIG_XMON
575static int __init early_xmon(char *p)
576{
577 /* ensure xmon is enabled */
578 if (p) {
579 if (strncmp(p, "on", 2) == 0)
580 xmon_init(1);
581 if (strncmp(p, "off", 3) == 0)
582 xmon_init(0);
583 if (strncmp(p, "early", 5) != 0)
584 return 0;
585 }
586 xmon_init(1);
587 debugger(NULL);
588
589 return 0;
590}
591early_param("xmon", early_xmon);
592#endif
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
new file mode 100644
index 000000000000..2ebba755272e
--- /dev/null
+++ b/arch/powerpc/kernel/setup.h
@@ -0,0 +1,6 @@
1#ifndef _POWERPC_KERNEL_SETUP_H
2#define _POWERPC_KERNEL_SETUP_H
3
4void check_for_initrd(void);
5
6#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 3af2631e3fab..c98cfcc9cd9a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,6 +40,8 @@
40#include <asm/xmon.h> 40#include <asm/xmon.h>
41#include <asm/time.h> 41#include <asm/time.h>
42 42
43#include "setup.h"
44
43#define DBG(fmt...) 45#define DBG(fmt...)
44 46
45#if defined CONFIG_KGDB 47#if defined CONFIG_KGDB
@@ -70,8 +72,6 @@ unsigned int DMA_MODE_WRITE;
70int have_of = 1; 72int have_of = 1;
71 73
72#ifdef CONFIG_PPC_MULTIPLATFORM 74#ifdef CONFIG_PPC_MULTIPLATFORM
73int _machine = 0;
74
75extern void prep_init(void); 75extern void prep_init(void);
76extern void pmac_init(void); 76extern void pmac_init(void);
77extern void chrp_init(void); 77extern void chrp_init(void);
@@ -279,7 +279,6 @@ arch_initcall(ppc_init);
279/* Warning, IO base is not yet inited */ 279/* Warning, IO base is not yet inited */
280void __init setup_arch(char **cmdline_p) 280void __init setup_arch(char **cmdline_p)
281{ 281{
282 extern char *klimit;
283 extern void do_init_bootmem(void); 282 extern void do_init_bootmem(void);
284 283
285 /* so udelay does something sensible, assume <= 1000 bogomips */ 284 /* so udelay does something sensible, assume <= 1000 bogomips */
@@ -303,14 +302,9 @@ void __init setup_arch(char **cmdline_p)
303 pmac_feature_init(); /* New cool way */ 302 pmac_feature_init(); /* New cool way */
304#endif 303#endif
305 304
306#ifdef CONFIG_XMON 305#ifdef CONFIG_XMON_DEFAULT
307 xmon_map_scc(); 306 xmon_init(1);
308 if (strstr(cmd_line, "xmon")) { 307#endif
309 xmon_init(1);
310 debugger(NULL);
311 }
312#endif /* CONFIG_XMON */
313 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
314 308
315#if defined(CONFIG_KGDB) 309#if defined(CONFIG_KGDB)
316 if (ppc_md.kgdb_map_scc) 310 if (ppc_md.kgdb_map_scc)
@@ -343,7 +337,7 @@ void __init setup_arch(char **cmdline_p)
343 init_mm.start_code = PAGE_OFFSET; 337 init_mm.start_code = PAGE_OFFSET;
344 init_mm.end_code = (unsigned long) _etext; 338 init_mm.end_code = (unsigned long) _etext;
345 init_mm.end_data = (unsigned long) _edata; 339 init_mm.end_data = (unsigned long) _edata;
346 init_mm.brk = (unsigned long) klimit; 340 init_mm.brk = klimit;
347 341
348 /* Save unparsed command line copy for /proc/cmdline */ 342 /* Save unparsed command line copy for /proc/cmdline */
349 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 343 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0471e843b6c5..6791668213e7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,6 +61,8 @@
61#include <asm/xmon.h> 61#include <asm/xmon.h>
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63 63
64#include "setup.h"
65
64#ifdef DEBUG 66#ifdef DEBUG
65#define DBG(fmt...) udbg_printf(fmt) 67#define DBG(fmt...) udbg_printf(fmt)
66#else 68#else
@@ -94,15 +96,6 @@ extern void udbg_init_maple_realmode(void);
94 do { udbg_putc = call_rtas_display_status_delay; } while(0) 96 do { udbg_putc = call_rtas_display_status_delay; } while(0)
95#endif 97#endif
96 98
97/* extern void *stab; */
98extern unsigned long klimit;
99
100extern void mm_init_ppc64(void);
101extern void stab_initialize(unsigned long stab);
102extern void htab_initialize(void);
103extern void early_init_devtree(void *flat_dt);
104extern void unflatten_device_tree(void);
105
106int have_of = 1; 99int have_of = 1;
107int boot_cpuid = 0; 100int boot_cpuid = 0;
108int boot_cpuid_phys = 0; 101int boot_cpuid_phys = 0;
@@ -254,11 +247,10 @@ void __init early_setup(unsigned long dt_ptr)
254 * Iterate all ppc_md structures until we find the proper 247 * Iterate all ppc_md structures until we find the proper
255 * one for the current machine type 248 * one for the current machine type
256 */ 249 */
257 DBG("Probing machine type for platform %x...\n", 250 DBG("Probing machine type for platform %x...\n", _machine);
258 systemcfg->platform);
259 251
260 for (mach = machines; *mach; mach++) { 252 for (mach = machines; *mach; mach++) {
261 if ((*mach)->probe(systemcfg->platform)) 253 if ((*mach)->probe(_machine))
262 break; 254 break;
263 } 255 }
264 /* What can we do if we didn't find ? */ 256 /* What can we do if we didn't find ? */
@@ -290,6 +282,28 @@ void __init early_setup(unsigned long dt_ptr)
290 DBG(" <- early_setup()\n"); 282 DBG(" <- early_setup()\n");
291} 283}
292 284
285#ifdef CONFIG_SMP
286void early_setup_secondary(void)
287{
288 struct paca_struct *lpaca = get_paca();
289
290 /* Mark enabled in PACA */
291 lpaca->proc_enabled = 0;
292
293 /* Initialize hash table for that CPU */
294 htab_initialize_secondary();
295
296 /* Initialize STAB/SLB. We use a virtual address as it works
297 * in real mode on pSeries and we want a virutal address on
298 * iSeries anyway
299 */
300 if (cpu_has_feature(CPU_FTR_SLB))
301 slb_initialize();
302 else
303 stab_initialize(lpaca->stab_addr);
304}
305
306#endif /* CONFIG_SMP */
293 307
294#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 308#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
295void smp_release_cpus(void) 309void smp_release_cpus(void)
@@ -315,7 +329,8 @@ void smp_release_cpus(void)
315#endif /* CONFIG_SMP || CONFIG_KEXEC */ 329#endif /* CONFIG_SMP || CONFIG_KEXEC */
316 330
317/* 331/*
318 * Initialize some remaining members of the ppc64_caches and systemcfg structures 332 * Initialize some remaining members of the ppc64_caches and systemcfg
333 * structures
319 * (at least until we get rid of them completely). This is mostly some 334 * (at least until we get rid of them completely). This is mostly some
320 * cache informations about the CPU that will be used by cache flush 335 * cache informations about the CPU that will be used by cache flush
321 * routines and/or provided to userland 336 * routines and/or provided to userland
@@ -340,7 +355,7 @@ static void __init initialize_cache_info(void)
340 const char *dc, *ic; 355 const char *dc, *ic;
341 356
342 /* Then read cache informations */ 357 /* Then read cache informations */
343 if (systemcfg->platform == PLATFORM_POWERMAC) { 358 if (_machine == PLATFORM_POWERMAC) {
344 dc = "d-cache-block-size"; 359 dc = "d-cache-block-size";
345 ic = "i-cache-block-size"; 360 ic = "i-cache-block-size";
346 } else { 361 } else {
@@ -360,8 +375,8 @@ static void __init initialize_cache_info(void)
360 DBG("Argh, can't find dcache properties ! " 375 DBG("Argh, can't find dcache properties ! "
361 "sizep: %p, lsizep: %p\n", sizep, lsizep); 376 "sizep: %p, lsizep: %p\n", sizep, lsizep);
362 377
363 systemcfg->dcache_size = ppc64_caches.dsize = size; 378 _systemcfg->dcache_size = ppc64_caches.dsize = size;
364 systemcfg->dcache_line_size = 379 _systemcfg->dcache_line_size =
365 ppc64_caches.dline_size = lsize; 380 ppc64_caches.dline_size = lsize;
366 ppc64_caches.log_dline_size = __ilog2(lsize); 381 ppc64_caches.log_dline_size = __ilog2(lsize);
367 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 382 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
@@ -378,8 +393,8 @@ static void __init initialize_cache_info(void)
378 DBG("Argh, can't find icache properties ! " 393 DBG("Argh, can't find icache properties ! "
379 "sizep: %p, lsizep: %p\n", sizep, lsizep); 394 "sizep: %p, lsizep: %p\n", sizep, lsizep);
380 395
381 systemcfg->icache_size = ppc64_caches.isize = size; 396 _systemcfg->icache_size = ppc64_caches.isize = size;
382 systemcfg->icache_line_size = 397 _systemcfg->icache_line_size =
383 ppc64_caches.iline_size = lsize; 398 ppc64_caches.iline_size = lsize;
384 ppc64_caches.log_iline_size = __ilog2(lsize); 399 ppc64_caches.log_iline_size = __ilog2(lsize);
385 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 400 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
@@ -387,10 +402,12 @@ static void __init initialize_cache_info(void)
387 } 402 }
388 403
389 /* Add an eye catcher and the systemcfg layout version number */ 404 /* Add an eye catcher and the systemcfg layout version number */
390 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 405 strcpy(_systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
391 systemcfg->version.major = SYSTEMCFG_MAJOR; 406 _systemcfg->version.major = SYSTEMCFG_MAJOR;
392 systemcfg->version.minor = SYSTEMCFG_MINOR; 407 _systemcfg->version.minor = SYSTEMCFG_MINOR;
393 systemcfg->processor = mfspr(SPRN_PVR); 408 _systemcfg->processor = mfspr(SPRN_PVR);
409 _systemcfg->platform = _machine;
410 _systemcfg->physicalMemorySize = lmb_phys_mem_size();
394 411
395 DBG(" <- initialize_cache_info()\n"); 412 DBG(" <- initialize_cache_info()\n");
396} 413}
@@ -479,10 +496,10 @@ void __init setup_system(void)
479 printk("-----------------------------------------------------\n"); 496 printk("-----------------------------------------------------\n");
480 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 497 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
481 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); 498 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
482 printk("systemcfg = 0x%p\n", systemcfg); 499 printk("systemcfg = 0x%p\n", _systemcfg);
483 printk("systemcfg->platform = 0x%x\n", systemcfg->platform); 500 printk("systemcfg->platform = 0x%x\n", _systemcfg->platform);
484 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount); 501 printk("systemcfg->processorCount = 0x%lx\n", _systemcfg->processorCount);
485 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize); 502 printk("systemcfg->physicalMemorySize = 0x%lx\n", _systemcfg->physicalMemorySize);
486 printk("ppc64_caches.dcache_line_size = 0x%x\n", 503 printk("ppc64_caches.dcache_line_size = 0x%x\n",
487 ppc64_caches.dline_size); 504 ppc64_caches.dline_size);
488 printk("ppc64_caches.icache_line_size = 0x%x\n", 505 printk("ppc64_caches.icache_line_size = 0x%x\n",
@@ -564,12 +581,12 @@ void __init setup_syscall_map(void)
564 for (i = 0; i < __NR_syscalls; i++) { 581 for (i = 0; i < __NR_syscalls; i++) {
565 if (sys_call_table[i*2] != sys_ni_syscall) { 582 if (sys_call_table[i*2] != sys_ni_syscall) {
566 count64++; 583 count64++;
567 systemcfg->syscall_map_64[i >> 5] |= 584 _systemcfg->syscall_map_64[i >> 5] |=
568 0x80000000UL >> (i & 0x1f); 585 0x80000000UL >> (i & 0x1f);
569 } 586 }
570 if (sys_call_table[i*2+1] != sys_ni_syscall) { 587 if (sys_call_table[i*2+1] != sys_ni_syscall) {
571 count32++; 588 count32++;
572 systemcfg->syscall_map_32[i >> 5] |= 589 _systemcfg->syscall_map_32[i >> 5] |=
573 0x80000000UL >> (i & 0x1f); 590 0x80000000UL >> (i & 0x1f);
574 } 591 }
575 } 592 }
@@ -858,26 +875,6 @@ int check_legacy_ioport(unsigned long base_port)
858} 875}
859EXPORT_SYMBOL(check_legacy_ioport); 876EXPORT_SYMBOL(check_legacy_ioport);
860 877
861#ifdef CONFIG_XMON
862static int __init early_xmon(char *p)
863{
864 /* ensure xmon is enabled */
865 if (p) {
866 if (strncmp(p, "on", 2) == 0)
867 xmon_init(1);
868 if (strncmp(p, "off", 3) == 0)
869 xmon_init(0);
870 if (strncmp(p, "early", 5) != 0)
871 return 0;
872 }
873 xmon_init(1);
874 debugger(NULL);
875
876 return 0;
877}
878early_param("xmon", early_xmon);
879#endif
880
881void cpu_die(void) 878void cpu_die(void)
882{ 879{
883 if (ppc_md.cpu_die) 880 if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 081d931eae48..a7c4515f320f 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -42,6 +42,7 @@
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
45#include <asm/sigcontext.h>
45#ifdef CONFIG_PPC64 46#ifdef CONFIG_PPC64
46#include "ppc32.h" 47#include "ppc32.h"
47#include <asm/unistd.h> 48#include <asm/unistd.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 36d67a8d7cbb..e28a139c29d0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -44,6 +44,7 @@
44#include <asm/cputable.h> 44#include <asm/cputable.h>
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/mpic.h> 46#include <asm/mpic.h>
47#include <asm/systemcfg.h>
47#ifdef CONFIG_PPC64 48#ifdef CONFIG_PPC64
48#include <asm/paca.h> 49#include <asm/paca.h>
49#endif 50#endif
@@ -368,9 +369,11 @@ int generic_cpu_disable(void)
368 if (cpu == boot_cpuid) 369 if (cpu == boot_cpuid)
369 return -EBUSY; 370 return -EBUSY;
370 371
371 systemcfg->processorCount--;
372 cpu_clear(cpu, cpu_online_map); 372 cpu_clear(cpu, cpu_online_map);
373#ifdef CONFIG_PPC64
374 _systemcfg->processorCount--;
373 fixup_irqs(cpu_online_map); 375 fixup_irqs(cpu_online_map);
376#endif
374 return 0; 377 return 0;
375} 378}
376 379
@@ -388,9 +391,11 @@ int generic_cpu_enable(unsigned int cpu)
388 while (!cpu_online(cpu)) 391 while (!cpu_online(cpu))
389 cpu_relax(); 392 cpu_relax();
390 393
394#ifdef CONFIG_PPC64
391 fixup_irqs(cpu_online_map); 395 fixup_irqs(cpu_online_map);
392 /* counter the irq disable in fixup_irqs */ 396 /* counter the irq disable in fixup_irqs */
393 local_irq_enable(); 397 local_irq_enable();
398#endif
394 return 0; 399 return 0;
395} 400}
396 401
@@ -419,7 +424,9 @@ void generic_mach_cpu_die(void)
419 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 424 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
420 cpu_relax(); 425 cpu_relax();
421 426
427#ifdef CONFIG_PPC64
422 flush_tlb_pending(); 428 flush_tlb_pending();
429#endif
423 cpu_set(cpu, cpu_online_map); 430 cpu_set(cpu, cpu_online_map);
424 local_irq_enable(); 431 local_irq_enable();
425} 432}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index a8210ed5c686..9c921d1c4084 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -52,7 +52,6 @@
52#include <asm/semaphore.h> 52#include <asm/semaphore.h>
53#include <asm/time.h> 53#include <asm/time.h>
54#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h>
56#include <asm/ppc-pci.h> 55#include <asm/ppc-pci.h>
57 56
58/* readdir & getdents */ 57/* readdir & getdents */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
new file mode 100644
index 000000000000..850af198fb5f
--- /dev/null
+++ b/arch/powerpc/kernel/sysfs.c
@@ -0,0 +1,384 @@
1#include <linux/config.h>
2#include <linux/sysdev.h>
3#include <linux/cpu.h>
4#include <linux/smp.h>
5#include <linux/percpu.h>
6#include <linux/init.h>
7#include <linux/sched.h>
8#include <linux/module.h>
9#include <linux/nodemask.h>
10#include <linux/cpumask.h>
11#include <linux/notifier.h>
12
13#include <asm/current.h>
14#include <asm/processor.h>
15#include <asm/cputable.h>
16#include <asm/firmware.h>
17#include <asm/hvcall.h>
18#include <asm/prom.h>
19#include <asm/systemcfg.h>
20#include <asm/paca.h>
21#include <asm/lppaca.h>
22#include <asm/machdep.h>
23#include <asm/smp.h>
24
25static DEFINE_PER_CPU(struct cpu, cpu_devices);
26
27/* SMT stuff */
28
29#ifdef CONFIG_PPC_MULTIPLATFORM
30/* default to snooze disabled */
31DEFINE_PER_CPU(unsigned long, smt_snooze_delay);
32
33static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
34 size_t count)
35{
36 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
37 ssize_t ret;
38 unsigned long snooze;
39
40 ret = sscanf(buf, "%lu", &snooze);
41 if (ret != 1)
42 return -EINVAL;
43
44 per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
45
46 return count;
47}
48
49static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf)
50{
51 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
52
53 return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
54}
55
56static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
57 store_smt_snooze_delay);
58
59/* Only parse OF options if the matching cmdline option was not specified */
60static int smt_snooze_cmdline;
61
62static int __init smt_setup(void)
63{
64 struct device_node *options;
65 unsigned int *val;
66 unsigned int cpu;
67
68 if (!cpu_has_feature(CPU_FTR_SMT))
69 return 1;
70
71 options = find_path_device("/options");
72 if (!options)
73 return 1;
74
75 val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
76 NULL);
77 if (!smt_snooze_cmdline && val) {
78 for_each_cpu(cpu)
79 per_cpu(smt_snooze_delay, cpu) = *val;
80 }
81
82 return 1;
83}
84__initcall(smt_setup);
85
86static int __init setup_smt_snooze_delay(char *str)
87{
88 unsigned int cpu;
89 int snooze;
90
91 if (!cpu_has_feature(CPU_FTR_SMT))
92 return 1;
93
94 smt_snooze_cmdline = 1;
95
96 if (get_option(&str, &snooze)) {
97 for_each_cpu(cpu)
98 per_cpu(smt_snooze_delay, cpu) = snooze;
99 }
100
101 return 1;
102}
103__setup("smt-snooze-delay=", setup_smt_snooze_delay);
104
105#endif /* CONFIG_PPC_MULTIPLATFORM */
106
107/*
108 * Enabling PMCs will slow partition context switch times so we only do
109 * it the first time we write to the PMCs.
110 */
111
112static DEFINE_PER_CPU(char, pmcs_enabled);
113
114void ppc64_enable_pmcs(void)
115{
116 /* Only need to enable them once */
117 if (__get_cpu_var(pmcs_enabled))
118 return;
119
120 __get_cpu_var(pmcs_enabled) = 1;
121
122 if (ppc_md.enable_pmcs)
123 ppc_md.enable_pmcs();
124}
125EXPORT_SYMBOL(ppc64_enable_pmcs);
126
127/* XXX convert to rusty's on_one_cpu */
128static unsigned long run_on_cpu(unsigned long cpu,
129 unsigned long (*func)(unsigned long),
130 unsigned long arg)
131{
132 cpumask_t old_affinity = current->cpus_allowed;
133 unsigned long ret;
134
135 /* should return -EINVAL to userspace */
136 if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
137 return 0;
138
139 ret = func(arg);
140
141 set_cpus_allowed(current, old_affinity);
142
143 return ret;
144}
145
146#define SYSFS_PMCSETUP(NAME, ADDRESS) \
147static unsigned long read_##NAME(unsigned long junk) \
148{ \
149 return mfspr(ADDRESS); \
150} \
151static unsigned long write_##NAME(unsigned long val) \
152{ \
153 ppc64_enable_pmcs(); \
154 mtspr(ADDRESS, val); \
155 return 0; \
156} \
157static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
158{ \
159 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
160 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
161 return sprintf(buf, "%lx\n", val); \
162} \
163static ssize_t __attribute_used__ \
164 store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
165{ \
166 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
167 unsigned long val; \
168 int ret = sscanf(buf, "%lx", &val); \
169 if (ret != 1) \
170 return -EINVAL; \
171 run_on_cpu(cpu->sysdev.id, write_##NAME, val); \
172 return count; \
173}
174
175SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
176SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
177SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
178SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
179SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
180SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
181SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
182SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
183SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
184SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
185SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
186SYSFS_PMCSETUP(purr, SPRN_PURR);
187
188static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
189static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
190static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
191static SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1);
192static SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2);
193static SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3);
194static SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4);
195static SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5);
196static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
197static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
198static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
199static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
200
201static void register_cpu_online(unsigned int cpu)
202{
203 struct cpu *c = &per_cpu(cpu_devices, cpu);
204 struct sys_device *s = &c->sysdev;
205
206#ifndef CONFIG_PPC_ISERIES
207 if (cpu_has_feature(CPU_FTR_SMT))
208 sysdev_create_file(s, &attr_smt_snooze_delay);
209#endif
210
211 /* PMC stuff */
212
213 sysdev_create_file(s, &attr_mmcr0);
214 sysdev_create_file(s, &attr_mmcr1);
215
216 if (cpu_has_feature(CPU_FTR_MMCRA))
217 sysdev_create_file(s, &attr_mmcra);
218
219 if (cur_cpu_spec->num_pmcs >= 1)
220 sysdev_create_file(s, &attr_pmc1);
221 if (cur_cpu_spec->num_pmcs >= 2)
222 sysdev_create_file(s, &attr_pmc2);
223 if (cur_cpu_spec->num_pmcs >= 3)
224 sysdev_create_file(s, &attr_pmc3);
225 if (cur_cpu_spec->num_pmcs >= 4)
226 sysdev_create_file(s, &attr_pmc4);
227 if (cur_cpu_spec->num_pmcs >= 5)
228 sysdev_create_file(s, &attr_pmc5);
229 if (cur_cpu_spec->num_pmcs >= 6)
230 sysdev_create_file(s, &attr_pmc6);
231 if (cur_cpu_spec->num_pmcs >= 7)
232 sysdev_create_file(s, &attr_pmc7);
233 if (cur_cpu_spec->num_pmcs >= 8)
234 sysdev_create_file(s, &attr_pmc8);
235
236 if (cpu_has_feature(CPU_FTR_SMT))
237 sysdev_create_file(s, &attr_purr);
238}
239
240#ifdef CONFIG_HOTPLUG_CPU
241static void unregister_cpu_online(unsigned int cpu)
242{
243 struct cpu *c = &per_cpu(cpu_devices, cpu);
244 struct sys_device *s = &c->sysdev;
245
246 BUG_ON(c->no_control);
247
248#ifndef CONFIG_PPC_ISERIES
249 if (cpu_has_feature(CPU_FTR_SMT))
250 sysdev_remove_file(s, &attr_smt_snooze_delay);
251#endif
252
253 /* PMC stuff */
254
255 sysdev_remove_file(s, &attr_mmcr0);
256 sysdev_remove_file(s, &attr_mmcr1);
257
258 if (cpu_has_feature(CPU_FTR_MMCRA))
259 sysdev_remove_file(s, &attr_mmcra);
260
261 if (cur_cpu_spec->num_pmcs >= 1)
262 sysdev_remove_file(s, &attr_pmc1);
263 if (cur_cpu_spec->num_pmcs >= 2)
264 sysdev_remove_file(s, &attr_pmc2);
265 if (cur_cpu_spec->num_pmcs >= 3)
266 sysdev_remove_file(s, &attr_pmc3);
267 if (cur_cpu_spec->num_pmcs >= 4)
268 sysdev_remove_file(s, &attr_pmc4);
269 if (cur_cpu_spec->num_pmcs >= 5)
270 sysdev_remove_file(s, &attr_pmc5);
271 if (cur_cpu_spec->num_pmcs >= 6)
272 sysdev_remove_file(s, &attr_pmc6);
273 if (cur_cpu_spec->num_pmcs >= 7)
274 sysdev_remove_file(s, &attr_pmc7);
275 if (cur_cpu_spec->num_pmcs >= 8)
276 sysdev_remove_file(s, &attr_pmc8);
277
278 if (cpu_has_feature(CPU_FTR_SMT))
279 sysdev_remove_file(s, &attr_purr);
280}
281#endif /* CONFIG_HOTPLUG_CPU */
282
283static int __devinit sysfs_cpu_notify(struct notifier_block *self,
284 unsigned long action, void *hcpu)
285{
286 unsigned int cpu = (unsigned int)(long)hcpu;
287
288 switch (action) {
289 case CPU_ONLINE:
290 register_cpu_online(cpu);
291 break;
292#ifdef CONFIG_HOTPLUG_CPU
293 case CPU_DEAD:
294 unregister_cpu_online(cpu);
295 break;
296#endif
297 }
298 return NOTIFY_OK;
299}
300
301static struct notifier_block __devinitdata sysfs_cpu_nb = {
302 .notifier_call = sysfs_cpu_notify,
303};
304
305/* NUMA stuff */
306
307#ifdef CONFIG_NUMA
308static struct node node_devices[MAX_NUMNODES];
309
310static void register_nodes(void)
311{
312 int i;
313
314 for (i = 0; i < MAX_NUMNODES; i++) {
315 if (node_online(i)) {
316 int p_node = parent_node(i);
317 struct node *parent = NULL;
318
319 if (p_node != i)
320 parent = &node_devices[p_node];
321
322 register_node(&node_devices[i], i, parent);
323 }
324 }
325}
326#else
327static void register_nodes(void)
328{
329 return;
330}
331#endif
332
333/* Only valid if CPU is present. */
334static ssize_t show_physical_id(struct sys_device *dev, char *buf)
335{
336 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
337
338 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
339}
340static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
341
342static int __init topology_init(void)
343{
344 int cpu;
345 struct node *parent = NULL;
346
347 register_nodes();
348
349 register_cpu_notifier(&sysfs_cpu_nb);
350
351 for_each_cpu(cpu) {
352 struct cpu *c = &per_cpu(cpu_devices, cpu);
353
354#ifdef CONFIG_NUMA
355 /* The node to which a cpu belongs can't be known
356 * until the cpu is made present.
357 */
358 parent = NULL;
359 if (cpu_present(cpu))
360 parent = &node_devices[cpu_to_node(cpu)];
361#endif
362 /*
363 * For now, we just see if the system supports making
364 * the RTAS calls for CPU hotplug. But, there may be a
365 * more comprehensive way to do this for an individual
366 * CPU. For instance, the boot cpu might never be valid
367 * for hotplugging.
368 */
369 if (!ppc_md.cpu_die)
370 c->no_control = 1;
371
372 if (cpu_online(cpu) || (c->no_control == 0)) {
373 register_cpu(c, cpu, parent);
374
375 sysdev_create_file(&c->sysdev, &attr_physical_id);
376 }
377
378 if (cpu_online(cpu))
379 register_cpu_online(cpu);
380 }
381
382 return 0;
383}
384__initcall(topology_init);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a6282b625b44..260b6ecd26a9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -271,13 +271,13 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
271 * tb_to_xs and stamp_xsec values are consistent. If not, then it 271 * tb_to_xs and stamp_xsec values are consistent. If not, then it
272 * loops back and reads them again until this criteria is met. 272 * loops back and reads them again until this criteria is met.
273 */ 273 */
274 ++(systemcfg->tb_update_count); 274 ++(_systemcfg->tb_update_count);
275 smp_wmb(); 275 smp_wmb();
276 systemcfg->tb_orig_stamp = new_tb_stamp; 276 _systemcfg->tb_orig_stamp = new_tb_stamp;
277 systemcfg->stamp_xsec = new_stamp_xsec; 277 _systemcfg->stamp_xsec = new_stamp_xsec;
278 systemcfg->tb_to_xs = new_tb_to_xs; 278 _systemcfg->tb_to_xs = new_tb_to_xs;
279 smp_wmb(); 279 smp_wmb();
280 ++(systemcfg->tb_update_count); 280 ++(_systemcfg->tb_update_count);
281#endif 281#endif
282} 282}
283 283
@@ -357,8 +357,9 @@ static void iSeries_tb_recal(void)
357 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 357 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
358 tb_to_xs = divres.result_low; 358 tb_to_xs = divres.result_low;
359 do_gtod.varp->tb_to_xs = tb_to_xs; 359 do_gtod.varp->tb_to_xs = tb_to_xs;
360 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 360 _systemcfg->tb_ticks_per_sec =
361 systemcfg->tb_to_xs = tb_to_xs; 361 tb_ticks_per_sec;
362 _systemcfg->tb_to_xs = tb_to_xs;
362 } 363 }
363 else { 364 else {
364 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 365 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -483,6 +484,8 @@ void __init smp_space_timers(unsigned int max_cpus)
483 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 484 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
484 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 485 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
485 486
487 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
488 previous_tb -= tb_ticks_per_jiffy;
486 for_each_cpu(i) { 489 for_each_cpu(i) {
487 if (i != boot_cpuid) { 490 if (i != boot_cpuid) {
488 previous_tb += offset; 491 previous_tb += offset;
@@ -559,8 +562,8 @@ int do_settimeofday(struct timespec *tv)
559 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 562 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
560 563
561#ifdef CONFIG_PPC64 564#ifdef CONFIG_PPC64
562 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 565 _systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
563 systemcfg->tz_dsttime = sys_tz.tz_dsttime; 566 _systemcfg->tz_dsttime = sys_tz.tz_dsttime;
564#endif 567#endif
565 568
566 write_sequnlock_irqrestore(&xtime_lock, flags); 569 write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -711,11 +714,11 @@ void __init time_init(void)
711 do_gtod.varp->tb_to_xs = tb_to_xs; 714 do_gtod.varp->tb_to_xs = tb_to_xs;
712 do_gtod.tb_to_us = tb_to_us; 715 do_gtod.tb_to_us = tb_to_us;
713#ifdef CONFIG_PPC64 716#ifdef CONFIG_PPC64
714 systemcfg->tb_orig_stamp = tb_last_jiffy; 717 _systemcfg->tb_orig_stamp = tb_last_jiffy;
715 systemcfg->tb_update_count = 0; 718 _systemcfg->tb_update_count = 0;
716 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 719 _systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
717 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 720 _systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
718 systemcfg->tb_to_xs = tb_to_xs; 721 _systemcfg->tb_to_xs = tb_to_xs;
719#endif 722#endif
720 723
721 time_freq = 0; 724 time_freq = 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0578f8387603..2020bb7648fb 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -129,7 +129,7 @@ int die(const char *str, struct pt_regs *regs, long err)
129 nl = 1; 129 nl = 1;
130#endif 130#endif
131#ifdef CONFIG_PPC64 131#ifdef CONFIG_PPC64
132 switch (systemcfg->platform) { 132 switch (_machine) {
133 case PLATFORM_PSERIES: 133 case PLATFORM_PSERIES:
134 printk("PSERIES "); 134 printk("PSERIES ");
135 nl = 1; 135 nl = 1;
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
index b67ce3004ebf..f68ad71a0187 100644
--- a/arch/powerpc/lib/bitops.c
+++ b/arch/powerpc/lib/bitops.c
@@ -41,7 +41,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
41 tmp = *p; 41 tmp = *p;
42 42
43found_first: 43found_first:
44 tmp &= (~0UL >> (64 - size)); 44 tmp &= (~0UL >> (BITS_PER_LONG - size));
45 if (tmp == 0UL) /* Are any bits set? */ 45 if (tmp == 0UL) /* Are any bits set? */
46 return result + size; /* Nope. */ 46 return result + size; /* Nope. */
47found_middle: 47found_middle:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 22e474876133..706e8a63ced9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -84,10 +84,11 @@
84extern unsigned long dart_tablebase; 84extern unsigned long dart_tablebase;
85#endif /* CONFIG_U3_DART */ 85#endif /* CONFIG_U3_DART */
86 86
87static unsigned long _SDR1;
88struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
89
87hpte_t *htab_address; 90hpte_t *htab_address;
88unsigned long htab_hash_mask; 91unsigned long htab_hash_mask;
89unsigned long _SDR1;
90struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91int mmu_linear_psize = MMU_PAGE_4K; 92int mmu_linear_psize = MMU_PAGE_4K;
92int mmu_virtual_psize = MMU_PAGE_4K; 93int mmu_virtual_psize = MMU_PAGE_4K;
93#ifdef CONFIG_HUGETLB_PAGE 94#ifdef CONFIG_HUGETLB_PAGE
@@ -165,7 +166,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
165 * normal insert callback here. 166 * normal insert callback here.
166 */ 167 */
167#ifdef CONFIG_PPC_ISERIES 168#ifdef CONFIG_PPC_ISERIES
168 if (systemcfg->platform == PLATFORM_ISERIES_LPAR) 169 if (_machine == PLATFORM_ISERIES_LPAR)
169 ret = iSeries_hpte_insert(hpteg, va, 170 ret = iSeries_hpte_insert(hpteg, va,
170 virt_to_abs(paddr), 171 virt_to_abs(paddr),
171 tmp_mode, 172 tmp_mode,
@@ -174,7 +175,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
174 else 175 else
175#endif 176#endif
176#ifdef CONFIG_PPC_PSERIES 177#ifdef CONFIG_PPC_PSERIES
177 if (systemcfg->platform & PLATFORM_LPAR) 178 if (_machine & PLATFORM_LPAR)
178 ret = pSeries_lpar_hpte_insert(hpteg, va, 179 ret = pSeries_lpar_hpte_insert(hpteg, va,
179 virt_to_abs(paddr), 180 virt_to_abs(paddr),
180 tmp_mode, 181 tmp_mode,
@@ -293,7 +294,7 @@ static void __init htab_init_page_sizes(void)
293 * Not in the device-tree, let's fallback on known size 294 * Not in the device-tree, let's fallback on known size
294 * list for 16M capable GP & GR 295 * list for 16M capable GP & GR
295 */ 296 */
296 if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) && 297 if ((_machine != PLATFORM_ISERIES_LPAR) &&
297 cpu_has_feature(CPU_FTR_16M_PAGE)) 298 cpu_has_feature(CPU_FTR_16M_PAGE))
298 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 299 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
299 sizeof(mmu_psize_defaults_gp)); 300 sizeof(mmu_psize_defaults_gp));
@@ -364,7 +365,7 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
364 365
365static unsigned long __init htab_get_table_size(void) 366static unsigned long __init htab_get_table_size(void)
366{ 367{
367 unsigned long rnd_mem_size, pteg_count; 368 unsigned long mem_size, rnd_mem_size, pteg_count;
368 369
369 /* If hash size isn't already provided by the platform, we try to 370 /* If hash size isn't already provided by the platform, we try to
370 * retreive it from the device-tree. If it's not there neither, we 371 * retreive it from the device-tree. If it's not there neither, we
@@ -376,8 +377,9 @@ static unsigned long __init htab_get_table_size(void)
376 return 1UL << ppc64_pft_size; 377 return 1UL << ppc64_pft_size;
377 378
378 /* round mem_size up to next power of 2 */ 379 /* round mem_size up to next power of 2 */
379 rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize); 380 mem_size = lmb_phys_mem_size();
380 if (rnd_mem_size < systemcfg->physicalMemorySize) 381 rnd_mem_size = 1UL << __ilog2(mem_size);
382 if (rnd_mem_size < mem_size)
381 rnd_mem_size <<= 1; 383 rnd_mem_size <<= 1;
382 384
383 /* # pages / 2 */ 385 /* # pages / 2 */
@@ -386,6 +388,15 @@ static unsigned long __init htab_get_table_size(void)
386 return pteg_count << 7; 388 return pteg_count << 7;
387} 389}
388 390
391#ifdef CONFIG_MEMORY_HOTPLUG
392void create_section_mapping(unsigned long start, unsigned long end)
393{
394 BUG_ON(htab_bolt_mapping(start, end, start,
395 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
396 mmu_linear_psize));
397}
398#endif /* CONFIG_MEMORY_HOTPLUG */
399
389void __init htab_initialize(void) 400void __init htab_initialize(void)
390{ 401{
391 unsigned long table, htab_size_bytes; 402 unsigned long table, htab_size_bytes;
@@ -410,7 +421,7 @@ void __init htab_initialize(void)
410 421
411 htab_hash_mask = pteg_count - 1; 422 htab_hash_mask = pteg_count - 1;
412 423
413 if (systemcfg->platform & PLATFORM_LPAR) { 424 if (platform_is_lpar()) {
414 /* Using a hypervisor which owns the htab */ 425 /* Using a hypervisor which owns the htab */
415 htab_address = NULL; 426 htab_address = NULL;
416 _SDR1 = 0; 427 _SDR1 = 0;
@@ -431,6 +442,9 @@ void __init htab_initialize(void)
431 442
432 /* Initialize the HPT with no entries */ 443 /* Initialize the HPT with no entries */
433 memset((void *)table, 0, htab_size_bytes); 444 memset((void *)table, 0, htab_size_bytes);
445
446 /* Set SDR1 */
447 mtspr(SPRN_SDR1, _SDR1);
434 } 448 }
435 449
436 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; 450 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -500,6 +514,12 @@ void __init htab_initialize(void)
500#undef KB 514#undef KB
501#undef MB 515#undef MB
502 516
517void __init htab_initialize_secondary(void)
518{
519 if (!platform_is_lpar())
520 mtspr(SPRN_SDR1, _SDR1);
521}
522
503/* 523/*
504 * Called by asm hashtable.S for doing lazy icache flush 524 * Called by asm hashtable.S for doing lazy icache flush
505 */ 525 */
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 4612a79dfb6e..7d4b8b5f0606 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -84,9 +84,6 @@ void MMU_init(void);
84/* XXX should be in current.h -- paulus */ 84/* XXX should be in current.h -- paulus */
85extern struct task_struct *current_set[NR_CPUS]; 85extern struct task_struct *current_set[NR_CPUS];
86 86
87char *klimit = _end;
88struct device_node *memory_node;
89
90extern int init_bootmem_done; 87extern int init_bootmem_done;
91 88
92/* 89/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index ce974c83d88a..1134f70f231d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -20,6 +20,8 @@
20 * 20 *
21 */ 21 */
22 22
23#undef DEBUG
24
23#include <linux/config.h> 25#include <linux/config.h>
24#include <linux/signal.h> 26#include <linux/signal.h>
25#include <linux/sched.h> 27#include <linux/sched.h>
@@ -64,6 +66,12 @@
64#include <asm/vdso.h> 66#include <asm/vdso.h>
65#include <asm/imalloc.h> 67#include <asm/imalloc.h>
66 68
69#ifdef DEBUG
70#define DBG(fmt...) printk(fmt)
71#else
72#define DBG(fmt...)
73#endif
74
67#if PGTABLE_RANGE > USER_VSID_RANGE 75#if PGTABLE_RANGE > USER_VSID_RANGE
68#warning Limited user VSID range means pagetable space is wasted 76#warning Limited user VSID range means pagetable space is wasted
69#endif 77#endif
@@ -72,8 +80,6 @@
72#warning TASK_SIZE is smaller than it needs to be. 80#warning TASK_SIZE is smaller than it needs to be.
73#endif 81#endif
74 82
75unsigned long klimit = (unsigned long)_end;
76
77/* max amount of RAM to use */ 83/* max amount of RAM to use */
78unsigned long __max_memory; 84unsigned long __max_memory;
79 85
@@ -188,14 +194,14 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
188} 194}
189 195
190#ifdef CONFIG_PPC_64K_PAGES 196#ifdef CONFIG_PPC_64K_PAGES
191static const int pgtable_cache_size[2] = { 197static const unsigned int pgtable_cache_size[3] = {
192 PTE_TABLE_SIZE, PGD_TABLE_SIZE 198 PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
193}; 199};
194static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 200static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
195 "pte_pmd_cache", "pgd_cache", 201 "pte_pmd_cache", "pmd_cache", "pgd_cache",
196}; 202};
197#else 203#else
198static const int pgtable_cache_size[2] = { 204static const unsigned int pgtable_cache_size[2] = {
199 PTE_TABLE_SIZE, PMD_TABLE_SIZE 205 PTE_TABLE_SIZE, PMD_TABLE_SIZE
200}; 206};
201static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 207static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
@@ -213,6 +219,8 @@ void pgtable_cache_init(void)
213 int size = pgtable_cache_size[i]; 219 int size = pgtable_cache_size[i];
214 const char *name = pgtable_cache_name[i]; 220 const char *name = pgtable_cache_name[i];
215 221
222 DBG("Allocating page table cache %s (#%d) "
223 "for size: %08x...\n", name, i, size);
216 pgtable_cache[i] = kmem_cache_create(name, 224 pgtable_cache[i] = kmem_cache_create(name,
217 size, size, 225 size, size,
218 SLAB_HWCACHE_ALIGN | 226 SLAB_HWCACHE_ALIGN |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 6f55efd9be95..1dd3cc69a490 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
110void online_page(struct page *page) 110void online_page(struct page *page)
111{ 111{
112 ClearPageReserved(page); 112 ClearPageReserved(page);
113 set_page_count(page, 0);
113 free_cold_page(page); 114 free_cold_page(page);
114 totalram_pages++; 115 totalram_pages++;
115 num_physpages++; 116 num_physpages++;
@@ -127,6 +128,9 @@ int __devinit add_memory(u64 start, u64 size)
127 unsigned long start_pfn = start >> PAGE_SHIFT; 128 unsigned long start_pfn = start >> PAGE_SHIFT;
128 unsigned long nr_pages = size >> PAGE_SHIFT; 129 unsigned long nr_pages = size >> PAGE_SHIFT;
129 130
131 start += KERNELBASE;
132 create_section_mapping(start, start + size);
133
130 /* this should work for most non-highmem platforms */ 134 /* this should work for most non-highmem platforms */
131 zone = pgdata->node_zones; 135 zone = pgdata->node_zones;
132 136
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 900842451bd3..c7f7bb6f30b3 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -122,8 +122,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
122 * 122 *
123 */ 123 */
124 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 124 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
125 mmu_virtual_psize)) 125 mmu_virtual_psize)) {
126 panic("Can't map bolted IO mapping"); 126 printk(KERN_ERR "Failed to do bolted mapping IO "
127 "memory at %016lx !\n", pa);
128 return -ENOMEM;
129 }
127 } 130 }
128 return 0; 131 return 0;
129} 132}
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index fa325dbf98fc..cfbb4e1f966b 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,6 +20,7 @@
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/lmb.h> 21#include <asm/lmb.h>
22#include <asm/abs_addr.h> 22#include <asm/abs_addr.h>
23#include <asm/firmware.h>
23 24
24struct stab_entry { 25struct stab_entry {
25 unsigned long esid_data; 26 unsigned long esid_data;
@@ -256,7 +257,7 @@ void stabs_alloc(void)
256 257
257 paca[cpu].stab_addr = newstab; 258 paca[cpu].stab_addr = newstab;
258 paca[cpu].stab_real = virt_to_abs(newstab); 259 paca[cpu].stab_real = virt_to_abs(newstab);
259 printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx " 260 printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
260 "virtual, 0x%lx absolute\n", 261 "virtual, 0x%lx absolute\n",
261 cpu, paca[cpu].stab_addr, paca[cpu].stab_real); 262 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
262 } 263 }
@@ -270,10 +271,28 @@ void stabs_alloc(void)
270void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
271{ 272{
272 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(KERNELBASE);
274 unsigned long stabreal;
273 275
274 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
275 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(KERNELBASE), vsid);
276 278
277 /* Order update */ 279 /* Order update */
278 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");
281
282 /* Set ASR */
283 stabreal = get_paca()->stab_real | 0x1ul;
284
285#ifdef CONFIG_PPC_ISERIES
286 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
287 HvCall1(HvCallBaseSetASR, stabreal);
288 return;
289 }
290#endif /* CONFIG_PPC_ISERIES */
291#ifdef CONFIG_PPC_PSERIES
292 if (platform_is_lpar()) {
293 plpar_hcall_norets(H_SET_ASR, stabreal);
294 return;
295 }
296#endif
297 mtspr(SPRN_ASR, stabreal);
279} 298}
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index c4ee5478427b..e3a024e324b6 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -233,8 +233,7 @@ static unsigned long get_pc(struct pt_regs *regs)
233 mmcra = mfspr(SPRN_MMCRA); 233 mmcra = mfspr(SPRN_MMCRA);
234 234
235 /* Were we in the hypervisor? */ 235 /* Were we in the hypervisor? */
236 if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) && 236 if (platform_is_lpar() && (mmcra & MMCRA_SIHV))
237 (mmcra & MMCRA_SIHV))
238 /* function descriptor madness */ 237 /* function descriptor madness */
239 return *((unsigned long *)hypervisor_bucket); 238 return *((unsigned long *)hypervisor_bucket);
240 239
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index ecd32d5d85f4..4099ddab9205 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -361,7 +361,9 @@ static void __init chrp_find_openpic(void)
361 printk(KERN_INFO "OpenPIC at %lx\n", opaddr); 361 printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
362 362
363 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ 363 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
364 prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4); 364 prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
365 /* i8259 cascade is always positive level */
366 init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
365 367
366 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); 368 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
367 if (iranges == NULL) 369 if (iranges == NULL)
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index a06603d84a45..01090e9ce0cf 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -103,6 +103,9 @@ static void intReceived(struct XmPciLpEvent *eventParm,
103 struct pt_regs *regsParm) 103 struct pt_regs *regsParm)
104{ 104{
105 int irq; 105 int irq;
106#ifdef CONFIG_IRQSTACKS
107 struct thread_info *curtp, *irqtp;
108#endif
106 109
107 ++Pci_Interrupt_Count; 110 ++Pci_Interrupt_Count;
108 111
@@ -110,7 +113,20 @@ static void intReceived(struct XmPciLpEvent *eventParm,
110 case XmPciLpEvent_SlotInterrupt: 113 case XmPciLpEvent_SlotInterrupt:
111 irq = eventParm->hvLpEvent.xCorrelationToken; 114 irq = eventParm->hvLpEvent.xCorrelationToken;
112 /* Dispatch the interrupt handlers for this irq */ 115 /* Dispatch the interrupt handlers for this irq */
113 ppc_irq_dispatch_handler(regsParm, irq); 116#ifdef CONFIG_IRQSTACKS
117 /* Switch to the irq stack to handle this */
118 curtp = current_thread_info();
119 irqtp = hardirq_ctx[smp_processor_id()];
120 if (curtp != irqtp) {
121 irqtp->task = curtp->task;
122 irqtp->flags = 0;
123 call___do_IRQ(irq, regsParm, irqtp);
124 irqtp->task = NULL;
125 if (irqtp->flags)
126 set_bits(irqtp->flags, &curtp->flags);
127 } else
128#endif
129 __do_IRQ(irq, regsParm);
114 HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, 130 HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
115 eventParm->eventData.slotInterrupt.subBusNumber, 131 eventParm->eventData.slotInterrupt.subBusNumber,
116 eventParm->eventData.slotInterrupt.deviceId); 132 eventParm->eventData.slotInterrupt.deviceId);
@@ -310,10 +326,8 @@ static void iSeries_disable_IRQ(unsigned int irq)
310} 326}
311 327
312/* 328/*
313 * Need to define this so ppc_irq_dispatch_handler will NOT call 329 * This does nothing because there is not enough information
314 * enable_IRQ at the end of interrupt handling. However, this does 330 * provided to do the EOI HvCall. This is done by XmPciLpEvent.c
315 * nothing because there is not enough information provided to do
316 * the EOI HvCall. This is done by XmPciLpEvent.c
317 */ 331 */
318static void iSeries_end_IRQ(unsigned int irq) 332static void iSeries_end_IRQ(unsigned int irq)
319{ 333{
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 09f14522e176..dfe7aa1ba098 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -15,6 +15,7 @@
15 15
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/ppc_asm.h>
18 19
19 .text 20 .text
20 21
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f8f0cda6a74..6a29f301436b 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -39,7 +39,8 @@
39#include <asm/sections.h> 39#include <asm/sections.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
41#include <asm/firmware.h> 41#include <asm/firmware.h>
42 42#include <asm/systemcfg.h>
43#include <asm/system.h>
43#include <asm/time.h> 44#include <asm/time.h>
44#include <asm/paca.h> 45#include <asm/paca.h>
45#include <asm/cache.h> 46#include <asm/cache.h>
@@ -71,7 +72,7 @@ extern void hvlog(char *fmt, ...);
71#endif 72#endif
72 73
73/* Function Prototypes */ 74/* Function Prototypes */
74static void build_iSeries_Memory_Map(void); 75static unsigned long build_iSeries_Memory_Map(void);
75static void iseries_shared_idle(void); 76static void iseries_shared_idle(void);
76static void iseries_dedicated_idle(void); 77static void iseries_dedicated_idle(void);
77#ifdef CONFIG_PCI 78#ifdef CONFIG_PCI
@@ -84,7 +85,6 @@ static void iSeries_pci_final_fixup(void) { }
84int piranha_simulator; 85int piranha_simulator;
85 86
86extern int rd_size; /* Defined in drivers/block/rd.c */ 87extern int rd_size; /* Defined in drivers/block/rd.c */
87extern unsigned long klimit;
88extern unsigned long embedded_sysmap_start; 88extern unsigned long embedded_sysmap_start;
89extern unsigned long embedded_sysmap_end; 89extern unsigned long embedded_sysmap_end;
90 90
@@ -403,9 +403,11 @@ void mschunks_alloc(unsigned long num_chunks)
403 * a table used to translate Linux's physical addresses to these 403 * a table used to translate Linux's physical addresses to these
404 * absolute addresses. Absolute addresses are needed when 404 * absolute addresses. Absolute addresses are needed when
405 * communicating with the hypervisor (e.g. to build HPT entries) 405 * communicating with the hypervisor (e.g. to build HPT entries)
406 *
407 * Returns the physical memory size
406 */ 408 */
407 409
408static void __init build_iSeries_Memory_Map(void) 410static unsigned long __init build_iSeries_Memory_Map(void)
409{ 411{
410 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; 412 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
411 u32 nextPhysChunk; 413 u32 nextPhysChunk;
@@ -538,7 +540,7 @@ static void __init build_iSeries_Memory_Map(void)
538 * which should be equal to 540 * which should be equal to
539 * nextPhysChunk 541 * nextPhysChunk
540 */ 542 */
541 systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk); 543 return chunk_to_addr(nextPhysChunk);
542} 544}
543 545
544/* 546/*
@@ -564,8 +566,8 @@ static void __init iSeries_setup_arch(void)
564 printk("Max physical processors = %d\n", 566 printk("Max physical processors = %d\n",
565 itVpdAreas.xSlicMaxPhysicalProcs); 567 itVpdAreas.xSlicMaxPhysicalProcs);
566 568
567 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; 569 _systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
568 printk("Processor version = %x\n", systemcfg->processor); 570 printk("Processor version = %x\n", _systemcfg->processor);
569} 571}
570 572
571static void iSeries_show_cpuinfo(struct seq_file *m) 573static void iSeries_show_cpuinfo(struct seq_file *m)
@@ -702,7 +704,6 @@ static void iseries_shared_idle(void)
702 704
703static void iseries_dedicated_idle(void) 705static void iseries_dedicated_idle(void)
704{ 706{
705 long oldval;
706 set_thread_flag(TIF_POLLING_NRFLAG); 707 set_thread_flag(TIF_POLLING_NRFLAG);
707 708
708 while (1) { 709 while (1) {
@@ -929,7 +930,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
929 dt_end_node(dt); 930 dt_end_node(dt);
930} 931}
931 932
932void build_flat_dt(struct iseries_flat_dt *dt) 933void build_flat_dt(struct iseries_flat_dt *dt, unsigned long phys_mem_size)
933{ 934{
934 u64 tmp[2]; 935 u64 tmp[2];
935 936
@@ -945,7 +946,7 @@ void build_flat_dt(struct iseries_flat_dt *dt)
945 dt_prop_str(dt, "name", "memory"); 946 dt_prop_str(dt, "name", "memory");
946 dt_prop_str(dt, "device_type", "memory"); 947 dt_prop_str(dt, "device_type", "memory");
947 tmp[0] = 0; 948 tmp[0] = 0;
948 tmp[1] = systemcfg->physicalMemorySize; 949 tmp[1] = phys_mem_size;
949 dt_prop_u64_list(dt, "reg", tmp, 2); 950 dt_prop_u64_list(dt, "reg", tmp, 2);
950 dt_end_node(dt); 951 dt_end_node(dt);
951 952
@@ -965,13 +966,15 @@ void build_flat_dt(struct iseries_flat_dt *dt)
965 966
966void * __init iSeries_early_setup(void) 967void * __init iSeries_early_setup(void)
967{ 968{
969 unsigned long phys_mem_size;
970
968 iSeries_fixup_klimit(); 971 iSeries_fixup_klimit();
969 972
970 /* 973 /*
971 * Initialize the table which translate Linux physical addresses to 974 * Initialize the table which translate Linux physical addresses to
972 * AS/400 absolute addresses 975 * AS/400 absolute addresses
973 */ 976 */
974 build_iSeries_Memory_Map(); 977 phys_mem_size = build_iSeries_Memory_Map();
975 978
976 iSeries_get_cmdline(); 979 iSeries_get_cmdline();
977 980
@@ -981,7 +984,7 @@ void * __init iSeries_early_setup(void)
981 /* Parse early parameters, in particular mem=x */ 984 /* Parse early parameters, in particular mem=x */
982 parse_early_param(); 985 parse_early_param();
983 986
984 build_flat_dt(&iseries_dt); 987 build_flat_dt(&iseries_dt, phys_mem_size);
985 988
986 return (void *) __pa(&iseries_dt); 989 return (void *) __pa(&iseries_dt);
987} 990}
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 340c21caeae2..895aeb3f75d0 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -380,9 +380,6 @@ void __init maple_pcibios_fixup(void)
380 for_each_pci_dev(dev) 380 for_each_pci_dev(dev)
381 pci_read_irq_line(dev); 381 pci_read_irq_line(dev);
382 382
383 /* Do the mapping of the IO space */
384 phbs_remap_io();
385
386 DBG(" <- maple_pcibios_fixup\n"); 383 DBG(" <- maple_pcibios_fixup\n");
387} 384}
388 385
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 8f818d092e2b..dfd41b9781a9 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -918,9 +918,6 @@ void __init pmac_pci_init(void)
918 PCI_DN(np)->busno = 0xf0; 918 PCI_DN(np)->busno = 0xf0;
919 } 919 }
920 920
921 /* map in PCI I/O space */
922 phbs_remap_io();
923
924 /* pmac_check_ht_link(); */ 921 /* pmac_check_ht_link(); */
925 922
926 /* Tell pci.c to not use the common resource allocation mechanism */ 923 /* Tell pci.c to not use the common resource allocation mechanism */
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 83a49e80ac29..90040c49494d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -74,6 +74,9 @@ static DEFINE_SPINLOCK(pmac_pic_lock);
74#define GATWICK_IRQ_POOL_SIZE 10 74#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; 75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76 76
77#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
78static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
79
77/* 80/*
78 * Mark an irq as "lost". This is only used on the pmac 81 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask). 82 * since it can lose interrupts (see pmac_set_irq_mask).
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index e1f9443cc872..957b09103422 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -305,9 +305,19 @@ static int __init smp_psurge_probe(void)
305 psurge_start = ioremap(PSURGE_START, 4); 305 psurge_start = ioremap(PSURGE_START, 4);
306 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); 306 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
307 307
308 /* this is not actually strictly necessary -- paulus. */ 308 /*
309 for (i = 1; i < ncpus; ++i) 309 * This is necessary because OF doesn't know about the
310 smp_hw_index[i] = i; 310 * secondary cpu(s), and thus there aren't nodes in the
311 * device tree for them, and smp_setup_cpu_maps hasn't
312 * set their bits in cpu_possible_map and cpu_present_map.
313 */
314 if (ncpus > NR_CPUS)
315 ncpus = NR_CPUS;
316 for (i = 1; i < ncpus ; ++i) {
317 cpu_set(i, cpu_present_map);
318 cpu_set(i, cpu_possible_map);
319 set_hard_smp_processor_id(i, i);
320 }
311 321
312 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 322 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
313 323
@@ -348,6 +358,7 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
348 int t; 358 int t;
349 359
350 set_dec(tb_ticks_per_jiffy); 360 set_dec(tb_ticks_per_jiffy);
361 /* XXX fixme */
351 set_tb(0, 0); 362 set_tb(0, 0);
352 last_jiffy_stamp(cpu_nr) = 0; 363 last_jiffy_stamp(cpu_nr) = 0;
353 364
@@ -363,8 +374,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
363 374
364 /* now interrupt the secondary, starting both TBs */ 375 /* now interrupt the secondary, starting both TBs */
365 psurge_set_ipi(1); 376 psurge_set_ipi(1);
366
367 smp_tb_synchronized = 1;
368} 377}
369 378
370static struct irqaction psurge_irqaction = { 379static struct irqaction psurge_irqaction = {
@@ -625,9 +634,8 @@ void smp_core99_give_timebase(void)
625 for (t = 100000; t > 0 && sec_tb_reset; --t) 634 for (t = 100000; t > 0 && sec_tb_reset; --t)
626 udelay(10); 635 udelay(10);
627 if (sec_tb_reset) 636 if (sec_tb_reset)
637 /* XXX BUG_ON here? */
628 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n"); 638 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
629 else
630 smp_tb_synchronized = 1;
631 639
632 /* Now, restart the timebase by leaving the GPIO to an open collector */ 640 /* Now, restart the timebase by leaving the GPIO to an open collector */
633 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); 641 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
@@ -810,19 +818,9 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
810} 818}
811 819
812 820
813/* Core99 Macs (dual G4s and G5s) */
814struct smp_ops_t core99_smp_ops = {
815 .message_pass = smp_mpic_message_pass,
816 .probe = smp_core99_probe,
817 .kick_cpu = smp_core99_kick_cpu,
818 .setup_cpu = smp_core99_setup_cpu,
819 .give_timebase = smp_core99_give_timebase,
820 .take_timebase = smp_core99_take_timebase,
821};
822
823#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) 821#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
824 822
825int __cpu_disable(void) 823int smp_core99_cpu_disable(void)
826{ 824{
827 cpu_clear(smp_processor_id(), cpu_online_map); 825 cpu_clear(smp_processor_id(), cpu_online_map);
828 826
@@ -846,7 +844,7 @@ void cpu_die(void)
846 low_cpu_die(); 844 low_cpu_die();
847} 845}
848 846
849void __cpu_die(unsigned int cpu) 847void smp_core99_cpu_die(unsigned int cpu)
850{ 848{
851 int timeout; 849 int timeout;
852 850
@@ -858,8 +856,21 @@ void __cpu_die(unsigned int cpu)
858 } 856 }
859 msleep(1); 857 msleep(1);
860 } 858 }
861 cpu_callin_map[cpu] = 0;
862 cpu_dead[cpu] = 0; 859 cpu_dead[cpu] = 0;
863} 860}
864 861
865#endif 862#endif
863
864/* Core99 Macs (dual G4s and G5s) */
865struct smp_ops_t core99_smp_ops = {
866 .message_pass = smp_mpic_message_pass,
867 .probe = smp_core99_probe,
868 .kick_cpu = smp_core99_kick_cpu,
869 .setup_cpu = smp_core99_setup_cpu,
870 .give_timebase = smp_core99_give_timebase,
871 .take_timebase = smp_core99_take_timebase,
872#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
873 .cpu_disable = smp_core99_cpu_disable,
874 .cpu_die = smp_core99_cpu_die,
875#endif
876};
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index b9938fece781..e7ca5b1f591e 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -3,3 +3,5 @@ obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
3obj-$(CONFIG_SMP) += smp.o 3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o 4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o 5obj-$(CONFIG_XICS) += xics.o
6obj-$(CONFIG_SCANLOG) += scanlog.o
7obj-$(CONFIG_EEH) += eeh.o eeh_event.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
new file mode 100644
index 000000000000..79de2310e70b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -0,0 +1,1212 @@
1/*
2 * eeh.c
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/list.h>
23#include <linux/pci.h>
24#include <linux/proc_fs.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/spinlock.h>
28#include <asm/atomic.h>
29#include <asm/eeh.h>
30#include <asm/eeh_event.h>
31#include <asm/io.h>
32#include <asm/machdep.h>
33#include <asm/ppc-pci.h>
34#include <asm/rtas.h>
35
36#undef DEBUG
37
38/** Overview:
39 * EEH, or "Extended Error Handling" is a PCI bridge technology for
40 * dealing with PCI bus errors that can't be dealt with within the
41 * usual PCI framework, except by check-stopping the CPU. Systems
42 * that are designed for high-availability/reliability cannot afford
43 * to crash due to a "mere" PCI error, thus the need for EEH.
44 * An EEH-capable bridge operates by converting a detected error
45 * into a "slot freeze", taking the PCI adapter off-line, making
46 * the slot behave, from the OS'es point of view, as if the slot
47 * were "empty": all reads return 0xff's and all writes are silently
48 * ignored. EEH slot isolation events can be triggered by parity
49 * errors on the address or data busses (e.g. during posted writes),
50 * which in turn might be caused by low voltage on the bus, dust,
51 * vibration, humidity, radioactivity or plain-old failed hardware.
52 *
53 * Note, however, that one of the leading causes of EEH slot
54 * freeze events are buggy device drivers, buggy device microcode,
55 * or buggy device hardware. This is because any attempt by the
56 * device to bus-master data to a memory address that is not
57 * assigned to the device will trigger a slot freeze. (The idea
58 * is to prevent devices-gone-wild from corrupting system memory).
59 * Buggy hardware/drivers will have a miserable time co-existing
60 * with EEH.
61 *
62 * Ideally, a PCI device driver, when suspecting that an isolation
63 * event has occured (e.g. by reading 0xff's), will then ask EEH
64 * whether this is the case, and then take appropriate steps to
65 * reset the PCI slot, the PCI device, and then resume operations.
66 * However, until that day, the checking is done here, with the
67 * eeh_check_failure() routine embedded in the MMIO macros. If
68 * the slot is found to be isolated, an "EEH Event" is synthesized
69 * and sent out for processing.
70 */
71
72/* If a device driver keeps reading an MMIO register in an interrupt
73 * handler after a slot isolation event has occurred, we assume it
74 * is broken and panic. This sets the threshold for how many read
75 * attempts we allow before panicking.
76 */
77#define EEH_MAX_FAILS 100000
78
79/* Misc forward declaraions */
80static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
81
82/* RTAS tokens */
83static int ibm_set_eeh_option;
84static int ibm_set_slot_reset;
85static int ibm_read_slot_reset_state;
86static int ibm_read_slot_reset_state2;
87static int ibm_slot_error_detail;
88
89static int eeh_subsystem_enabled;
90
91/* Lock to avoid races due to multiple reports of an error */
92static DEFINE_SPINLOCK(confirm_error_lock);
93
94/* Buffer for reporting slot-error-detail rtas calls */
95static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
96static DEFINE_SPINLOCK(slot_errbuf_lock);
97static int eeh_error_buf_size;
98
99/* System monitoring statistics */
100static DEFINE_PER_CPU(unsigned long, no_device);
101static DEFINE_PER_CPU(unsigned long, no_dn);
102static DEFINE_PER_CPU(unsigned long, no_cfg_addr);
103static DEFINE_PER_CPU(unsigned long, ignored_check);
104static DEFINE_PER_CPU(unsigned long, total_mmio_ffs);
105static DEFINE_PER_CPU(unsigned long, false_positives);
106static DEFINE_PER_CPU(unsigned long, ignored_failures);
107static DEFINE_PER_CPU(unsigned long, slot_resets);
108
109/**
110 * The pci address cache subsystem. This subsystem places
111 * PCI device address resources into a red-black tree, sorted
112 * according to the address range, so that given only an i/o
113 * address, the corresponding PCI device can be **quickly**
114 * found. It is safe to perform an address lookup in an interrupt
115 * context; this ability is an important feature.
116 *
117 * Currently, the only customer of this code is the EEH subsystem;
118 * thus, this code has been somewhat tailored to suit EEH better.
119 * In particular, the cache does *not* hold the addresses of devices
120 * for which EEH is not enabled.
121 *
122 * (Implementation Note: The RB tree seems to be better/faster
123 * than any hash algo I could think of for this problem, even
124 * with the penalty of slow pointer chases for d-cache misses).
125 */
126struct pci_io_addr_range
127{
128 struct rb_node rb_node;
129 unsigned long addr_lo;
130 unsigned long addr_hi;
131 struct pci_dev *pcidev;
132 unsigned int flags;
133};
134
135static struct pci_io_addr_cache
136{
137 struct rb_root rb_root;
138 spinlock_t piar_lock;
139} pci_io_addr_cache_root;
140
141static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
142{
143 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
144
145 while (n) {
146 struct pci_io_addr_range *piar;
147 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
148
149 if (addr < piar->addr_lo) {
150 n = n->rb_left;
151 } else {
152 if (addr > piar->addr_hi) {
153 n = n->rb_right;
154 } else {
155 pci_dev_get(piar->pcidev);
156 return piar->pcidev;
157 }
158 }
159 }
160
161 return NULL;
162}
163
164/**
165 * pci_get_device_by_addr - Get device, given only address
166 * @addr: mmio (PIO) phys address or i/o port number
167 *
168 * Given an mmio phys address, or a port number, find a pci device
169 * that implements this address. Be sure to pci_dev_put the device
170 * when finished. I/O port numbers are assumed to be offset
171 * from zero (that is, they do *not* have pci_io_addr added in).
172 * It is safe to call this function within an interrupt.
173 */
174static struct pci_dev *pci_get_device_by_addr(unsigned long addr)
175{
176 struct pci_dev *dev;
177 unsigned long flags;
178
179 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
180 dev = __pci_get_device_by_addr(addr);
181 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
182 return dev;
183}
184
185#ifdef DEBUG
186/*
187 * Handy-dandy debug print routine, does nothing more
188 * than print out the contents of our addr cache.
189 */
190static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
191{
192 struct rb_node *n;
193 int cnt = 0;
194
195 n = rb_first(&cache->rb_root);
196 while (n) {
197 struct pci_io_addr_range *piar;
198 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
199 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
200 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
201 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
202 cnt++;
203 n = rb_next(n);
204 }
205}
206#endif
207
208/* Insert address range into the rb tree. */
209static struct pci_io_addr_range *
210pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
211 unsigned long ahi, unsigned int flags)
212{
213 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
214 struct rb_node *parent = NULL;
215 struct pci_io_addr_range *piar;
216
217 /* Walk tree, find a place to insert into tree */
218 while (*p) {
219 parent = *p;
220 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
221 if (ahi < piar->addr_lo) {
222 p = &parent->rb_left;
223 } else if (alo > piar->addr_hi) {
224 p = &parent->rb_right;
225 } else {
226 if (dev != piar->pcidev ||
227 alo != piar->addr_lo || ahi != piar->addr_hi) {
228 printk(KERN_WARNING "PIAR: overlapping address range\n");
229 }
230 return piar;
231 }
232 }
233 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
234 if (!piar)
235 return NULL;
236
237 piar->addr_lo = alo;
238 piar->addr_hi = ahi;
239 piar->pcidev = dev;
240 piar->flags = flags;
241
242#ifdef DEBUG
243 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
244 alo, ahi, pci_name (dev));
245#endif
246
247 rb_link_node(&piar->rb_node, parent, p);
248 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
249
250 return piar;
251}
252
253static void __pci_addr_cache_insert_device(struct pci_dev *dev)
254{
255 struct device_node *dn;
256 struct pci_dn *pdn;
257 int i;
258 int inserted = 0;
259
260 dn = pci_device_to_OF_node(dev);
261 if (!dn) {
262 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
263 return;
264 }
265
266 /* Skip any devices for which EEH is not enabled. */
267 pdn = PCI_DN(dn);
268 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
269 pdn->eeh_mode & EEH_MODE_NOCHECK) {
270#ifdef DEBUG
271 printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
272 pci_name(dev), pdn->node->full_name);
273#endif
274 return;
275 }
276
277 /* The cache holds a reference to the device... */
278 pci_dev_get(dev);
279
280 /* Walk resources on this device, poke them into the tree */
281 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
282 unsigned long start = pci_resource_start(dev,i);
283 unsigned long end = pci_resource_end(dev,i);
284 unsigned int flags = pci_resource_flags(dev,i);
285
286 /* We are interested only bus addresses, not dma or other stuff */
287 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
288 continue;
289 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
290 continue;
291 pci_addr_cache_insert(dev, start, end, flags);
292 inserted = 1;
293 }
294
295 /* If there was nothing to add, the cache has no reference... */
296 if (!inserted)
297 pci_dev_put(dev);
298}
299
300/**
301 * pci_addr_cache_insert_device - Add a device to the address cache
302 * @dev: PCI device whose I/O addresses we are interested in.
303 *
304 * In order to support the fast lookup of devices based on addresses,
305 * we maintain a cache of devices that can be quickly searched.
306 * This routine adds a device to that cache.
307 */
308static void pci_addr_cache_insert_device(struct pci_dev *dev)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
313 __pci_addr_cache_insert_device(dev);
314 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
315}
316
317static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
318{
319 struct rb_node *n;
320 int removed = 0;
321
322restart:
323 n = rb_first(&pci_io_addr_cache_root.rb_root);
324 while (n) {
325 struct pci_io_addr_range *piar;
326 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
327
328 if (piar->pcidev == dev) {
329 rb_erase(n, &pci_io_addr_cache_root.rb_root);
330 removed = 1;
331 kfree(piar);
332 goto restart;
333 }
334 n = rb_next(n);
335 }
336
337 /* The cache no longer holds its reference to this device... */
338 if (removed)
339 pci_dev_put(dev);
340}
341
342/**
343 * pci_addr_cache_remove_device - remove pci device from addr cache
344 * @dev: device to remove
345 *
346 * Remove a device from the addr-cache tree.
347 * This is potentially expensive, since it will walk
348 * the tree multiple times (once per resource).
349 * But so what; device removal doesn't need to be that fast.
350 */
351static void pci_addr_cache_remove_device(struct pci_dev *dev)
352{
353 unsigned long flags;
354
355 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
356 __pci_addr_cache_remove_device(dev);
357 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
358}
359
360/**
361 * pci_addr_cache_build - Build a cache of I/O addresses
362 *
363 * Build a cache of pci i/o addresses. This cache will be used to
364 * find the pci device that corresponds to a given address.
365 * This routine scans all pci busses to build the cache.
366 * Must be run late in boot process, after the pci controllers
367 * have been scaned for devices (after all device resources are known).
368 */
369void __init pci_addr_cache_build(void)
370{
371 struct device_node *dn;
372 struct pci_dev *dev = NULL;
373
374 if (!eeh_subsystem_enabled)
375 return;
376
377 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
378
379 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
380 /* Ignore PCI bridges ( XXX why ??) */
381 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
382 continue;
383 }
384 pci_addr_cache_insert_device(dev);
385
386 /* Save the BAR's; firmware doesn't restore these after EEH reset */
387 dn = pci_device_to_OF_node(dev);
388 eeh_save_bars(dev, PCI_DN(dn));
389 }
390
391#ifdef DEBUG
392 /* Verify tree built up above, echo back the list of addrs. */
393 pci_addr_cache_print(&pci_io_addr_cache_root);
394#endif
395}
396
397/* --------------------------------------------------------------- */
398/* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */
399
400void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
401{
402 unsigned long flags;
403 int rc;
404
405 /* Log the error with the rtas logger */
406 spin_lock_irqsave(&slot_errbuf_lock, flags);
407 memset(slot_errbuf, 0, eeh_error_buf_size);
408
409 rc = rtas_call(ibm_slot_error_detail,
410 8, 1, NULL, pdn->eeh_config_addr,
411 BUID_HI(pdn->phb->buid),
412 BUID_LO(pdn->phb->buid), NULL, 0,
413 virt_to_phys(slot_errbuf),
414 eeh_error_buf_size,
415 severity);
416
417 if (rc == 0)
418 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
419 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
420}
421
422/**
423 * read_slot_reset_state - Read the reset state of a device node's slot
424 * @dn: device node to read
425 * @rets: array to return results in
426 */
427static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
428{
429 int token, outputs;
430
431 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
432 token = ibm_read_slot_reset_state2;
433 outputs = 4;
434 } else {
435 token = ibm_read_slot_reset_state;
436 rets[2] = 0; /* fake PE Unavailable info */
437 outputs = 3;
438 }
439
440 return rtas_call(token, 3, outputs, rets, pdn->eeh_config_addr,
441 BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
442}
443
444/**
445 * eeh_token_to_phys - convert EEH address token to phys address
446 * @token i/o token, should be address in the form 0xA....
447 */
448static inline unsigned long eeh_token_to_phys(unsigned long token)
449{
450 pte_t *ptep;
451 unsigned long pa;
452
453 ptep = find_linux_pte(init_mm.pgd, token);
454 if (!ptep)
455 return token;
456 pa = pte_pfn(*ptep) << PAGE_SHIFT;
457
458 return pa | (token & (PAGE_SIZE-1));
459}
460
461/**
462 * Return the "partitionable endpoint" (pe) under which this device lies
463 */
464static struct device_node * find_device_pe(struct device_node *dn)
465{
466 while ((dn->parent) && PCI_DN(dn->parent) &&
467 (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
468 dn = dn->parent;
469 }
470 return dn;
471}
472
473/** Mark all devices that are peers of this device as failed.
474 * Mark the device driver too, so that it can see the failure
475 * immediately; this is critical, since some drivers poll
476 * status registers in interrupts ... If a driver is polling,
477 * and the slot is frozen, then the driver can deadlock in
478 * an interrupt context, which is bad.
479 */
480
481static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
482{
483 while (dn) {
484 if (PCI_DN(dn)) {
485 PCI_DN(dn)->eeh_mode |= mode_flag;
486
487 if (dn->child)
488 __eeh_mark_slot (dn->child, mode_flag);
489 }
490 dn = dn->sibling;
491 }
492}
493
494void eeh_mark_slot (struct device_node *dn, int mode_flag)
495{
496 dn = find_device_pe (dn);
497 PCI_DN(dn)->eeh_mode |= mode_flag;
498 __eeh_mark_slot (dn->child, mode_flag);
499}
500
501static void __eeh_clear_slot (struct device_node *dn, int mode_flag)
502{
503 while (dn) {
504 if (PCI_DN(dn)) {
505 PCI_DN(dn)->eeh_mode &= ~mode_flag;
506 PCI_DN(dn)->eeh_check_count = 0;
507 if (dn->child)
508 __eeh_clear_slot (dn->child, mode_flag);
509 }
510 dn = dn->sibling;
511 }
512}
513
514void eeh_clear_slot (struct device_node *dn, int mode_flag)
515{
516 unsigned long flags;
517 spin_lock_irqsave(&confirm_error_lock, flags);
518 dn = find_device_pe (dn);
519 PCI_DN(dn)->eeh_mode &= ~mode_flag;
520 PCI_DN(dn)->eeh_check_count = 0;
521 __eeh_clear_slot (dn->child, mode_flag);
522 spin_unlock_irqrestore(&confirm_error_lock, flags);
523}
524
525/**
526 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
527 * @dn device node
528 * @dev pci device, if known
529 *
530 * Check for an EEH failure for the given device node. Call this
531 * routine if the result of a read was all 0xff's and you want to
532 * find out if this is due to an EEH slot freeze. This routine
533 * will query firmware for the EEH status.
534 *
535 * Returns 0 if there has not been an EEH error; otherwise returns
536 * a non-zero value and queues up a slot isolation event notification.
537 *
538 * It is safe to call this routine in an interrupt context.
539 */
540int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
541{
542 int ret;
543 int rets[3];
544 unsigned long flags;
545 struct pci_dn *pdn;
546 int rc = 0;
547
548 __get_cpu_var(total_mmio_ffs)++;
549
550 if (!eeh_subsystem_enabled)
551 return 0;
552
553 if (!dn) {
554 __get_cpu_var(no_dn)++;
555 return 0;
556 }
557 pdn = PCI_DN(dn);
558
559 /* Access to IO BARs might get this far and still not want checking. */
560 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
561 pdn->eeh_mode & EEH_MODE_NOCHECK) {
562 __get_cpu_var(ignored_check)++;
563#ifdef DEBUG
564 printk ("EEH:ignored check (%x) for %s %s\n",
565 pdn->eeh_mode, pci_name (dev), dn->full_name);
566#endif
567 return 0;
568 }
569
570 if (!pdn->eeh_config_addr) {
571 __get_cpu_var(no_cfg_addr)++;
572 return 0;
573 }
574
575 /* If we already have a pending isolation event for this
576 * slot, we know it's bad already, we don't need to check.
577 * Do this checking under a lock; as multiple PCI devices
578 * in one slot might report errors simultaneously, and we
579 * only want one error recovery routine running.
580 */
581 spin_lock_irqsave(&confirm_error_lock, flags);
582 rc = 1;
583 if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
584 pdn->eeh_check_count ++;
585 if (pdn->eeh_check_count >= EEH_MAX_FAILS) {
586 printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
587 pdn->eeh_check_count);
588 dump_stack();
589
590 /* re-read the slot reset state */
591 if (read_slot_reset_state(pdn, rets) != 0)
592 rets[0] = -1; /* reset state unknown */
593
594 /* If we are here, then we hit an infinite loop. Stop. */
595 panic("EEH: MMIO halt (%d) on device:%s\n", rets[0], pci_name(dev));
596 }
597 goto dn_unlock;
598 }
599
600 /*
601 * Now test for an EEH failure. This is VERY expensive.
602 * Note that the eeh_config_addr may be a parent device
603 * in the case of a device behind a bridge, or it may be
604 * function zero of a multi-function device.
605 * In any case they must share a common PHB.
606 */
607 ret = read_slot_reset_state(pdn, rets);
608
609 /* If the call to firmware failed, punt */
610 if (ret != 0) {
611 printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
612 ret, dn->full_name);
613 __get_cpu_var(false_positives)++;
614 rc = 0;
615 goto dn_unlock;
616 }
617
618 /* If EEH is not supported on this device, punt. */
619 if (rets[1] != 1) {
620 printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
621 ret, dn->full_name);
622 __get_cpu_var(false_positives)++;
623 rc = 0;
624 goto dn_unlock;
625 }
626
627 /* If not the kind of error we know about, punt. */
628 if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
629 __get_cpu_var(false_positives)++;
630 rc = 0;
631 goto dn_unlock;
632 }
633
634 /* Note that config-io to empty slots may fail;
635 * we recognize empty because they don't have children. */
636 if ((rets[0] == 5) && (dn->child == NULL)) {
637 __get_cpu_var(false_positives)++;
638 rc = 0;
639 goto dn_unlock;
640 }
641
642 __get_cpu_var(slot_resets)++;
643
644 /* Avoid repeated reports of this failure, including problems
645 * with other functions on this device, and functions under
646 * bridges. */
647 eeh_mark_slot (dn, EEH_MODE_ISOLATED);
648 spin_unlock_irqrestore(&confirm_error_lock, flags);
649
650 eeh_send_failure_event (dn, dev, rets[0], rets[2]);
651
652 /* Most EEH events are due to device driver bugs. Having
653 * a stack trace will help the device-driver authors figure
654 * out what happened. So print that out. */
655 if (rets[0] != 5) dump_stack();
656 return 1;
657
658dn_unlock:
659 spin_unlock_irqrestore(&confirm_error_lock, flags);
660 return rc;
661}
662
663EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
664
665/**
666 * eeh_check_failure - check if all 1's data is due to EEH slot freeze
667 * @token i/o token, should be address in the form 0xA....
668 * @val value, should be all 1's (XXX why do we need this arg??)
669 *
670 * Check for an EEH failure at the given token address. Call this
671 * routine if the result of a read was all 0xff's and you want to
672 * find out if this is due to an EEH slot freeze event. This routine
673 * will query firmware for the EEH status.
674 *
675 * Note this routine is safe to call in an interrupt context.
676 */
677unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
678{
679 unsigned long addr;
680 struct pci_dev *dev;
681 struct device_node *dn;
682
683 /* Finding the phys addr + pci device; this is pretty quick. */
684 addr = eeh_token_to_phys((unsigned long __force) token);
685 dev = pci_get_device_by_addr(addr);
686 if (!dev) {
687 __get_cpu_var(no_device)++;
688 return val;
689 }
690
691 dn = pci_device_to_OF_node(dev);
692 eeh_dn_check_failure (dn, dev);
693
694 pci_dev_put(dev);
695 return val;
696}
697
698EXPORT_SYMBOL(eeh_check_failure);
699
700/* ------------------------------------------------------------- */
701/* The code below deals with error recovery */
702
703/** Return negative value if a permanent error, else return
704 * a number of milliseconds to wait until the PCI slot is
705 * ready to be used.
706 */
707static int
708eeh_slot_availability(struct pci_dn *pdn)
709{
710 int rc;
711 int rets[3];
712
713 rc = read_slot_reset_state(pdn, rets);
714
715 if (rc) return rc;
716
717 if (rets[1] == 0) return -1; /* EEH is not supported */
718 if (rets[0] == 0) return 0; /* Oll Korrect */
719 if (rets[0] == 5) {
720 if (rets[2] == 0) return -1; /* permanently unavailable */
721 return rets[2]; /* number of millisecs to wait */
722 }
723 return -1;
724}
725
726/** rtas_pci_slot_reset raises/lowers the pci #RST line
727 * state: 1/0 to raise/lower the #RST
728 *
729 * Clear the EEH-frozen condition on a slot. This routine
730 * asserts the PCI #RST line if the 'state' argument is '1',
731 * and drops the #RST line if 'state is '0'. This routine is
732 * safe to call in an interrupt context.
733 *
734 */
735
736static void
737rtas_pci_slot_reset(struct pci_dn *pdn, int state)
738{
739 int rc;
740
741 BUG_ON (pdn==NULL);
742
743 if (!pdn->phb) {
744 printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
745 pdn->node->full_name);
746 return;
747 }
748
749 rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
750 pdn->eeh_config_addr,
751 BUID_HI(pdn->phb->buid),
752 BUID_LO(pdn->phb->buid),
753 state);
754 if (rc) {
755 printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n",
756 rc, state, pdn->node->full_name);
757 return;
758 }
759}
760
761/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
762 * dn -- device node to be reset.
763 */
764
765void
766rtas_set_slot_reset(struct pci_dn *pdn)
767{
768 int i, rc;
769
770 rtas_pci_slot_reset (pdn, 1);
771
772 /* The PCI bus requires that the reset be held high for at least
773 * a 100 milliseconds. We wait a bit longer 'just in case'. */
774
775#define PCI_BUS_RST_HOLD_TIME_MSEC 250
776 msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
777
778 /* We might get hit with another EEH freeze as soon as the
779 * pci slot reset line is dropped. Make sure we don't miss
780 * these, and clear the flag now. */
781 eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
782
783 rtas_pci_slot_reset (pdn, 0);
784
785 /* After a PCI slot has been reset, the PCI Express spec requires
786 * a 1.5 second idle time for the bus to stabilize, before starting
787 * up traffic. */
788#define PCI_BUS_SETTLE_TIME_MSEC 1800
789 msleep (PCI_BUS_SETTLE_TIME_MSEC);
790
791 /* Now double check with the firmware to make sure the device is
792 * ready to be used; if not, wait for recovery. */
793 for (i=0; i<10; i++) {
794 rc = eeh_slot_availability (pdn);
795 if (rc <= 0) break;
796
797 msleep (rc+100);
798 }
799}
800
801/* ------------------------------------------------------- */
802/** Save and restore of PCI BARs
803 *
804 * Although firmware will set up BARs during boot, it doesn't
805 * set up device BAR's after a device reset, although it will,
806 * if requested, set up bridge configuration. Thus, we need to
807 * configure the PCI devices ourselves.
808 */
809
810/**
811 * __restore_bars - Restore the Base Address Registers
812 * Loads the PCI configuration space base address registers,
813 * the expansion ROM base address, the latency timer, and etc.
814 * from the saved values in the device node.
815 */
816static inline void __restore_bars (struct pci_dn *pdn)
817{
818 int i;
819
820 if (NULL==pdn->phb) return;
821 for (i=4; i<10; i++) {
822 rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
823 }
824
825 /* 12 == Expansion ROM Address */
826 rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
827
828#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
829#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
830
831 rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
832 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
833
834 rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
835 SAVED_BYTE(PCI_LATENCY_TIMER));
836
837 /* max latency, min grant, interrupt pin and line */
838 rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
839}
840
841/**
842 * eeh_restore_bars - restore the PCI config space info
843 *
844 * This routine performs a recursive walk to the children
845 * of this device as well.
846 */
847void eeh_restore_bars(struct pci_dn *pdn)
848{
849 struct device_node *dn;
850 if (!pdn)
851 return;
852
853 if (! pdn->eeh_is_bridge)
854 __restore_bars (pdn);
855
856 dn = pdn->node->child;
857 while (dn) {
858 eeh_restore_bars (PCI_DN(dn));
859 dn = dn->sibling;
860 }
861}
862
863/**
864 * eeh_save_bars - save device bars
865 *
866 * Save the values of the device bars. Unlike the restore
867 * routine, this routine is *not* recursive. This is because
868 * PCI devices are added individuallly; but, for the restore,
869 * an entire slot is reset at a time.
870 */
871static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn)
872{
873 int i;
874
875 if (!pdev || !pdn )
876 return;
877
878 for (i = 0; i < 16; i++)
879 pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]);
880
881 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
882 pdn->eeh_is_bridge = 1;
883}
884
885void
886rtas_configure_bridge(struct pci_dn *pdn)
887{
888 int token = rtas_token ("ibm,configure-bridge");
889 int rc;
890
891 if (token == RTAS_UNKNOWN_SERVICE)
892 return;
893 rc = rtas_call(token,3,1, NULL,
894 pdn->eeh_config_addr,
895 BUID_HI(pdn->phb->buid),
896 BUID_LO(pdn->phb->buid));
897 if (rc) {
898 printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
899 rc, pdn->node->full_name);
900 }
901}
902
903/* ------------------------------------------------------------- */
904/* The code below deals with enabling EEH for devices during the
905 * early boot sequence. EEH must be enabled before any PCI probing
906 * can be done.
907 */
908
909#define EEH_ENABLE 1
910
911struct eeh_early_enable_info {
912 unsigned int buid_hi;
913 unsigned int buid_lo;
914};
915
916/* Enable eeh for the given device node. */
917static void *early_enable_eeh(struct device_node *dn, void *data)
918{
919 struct eeh_early_enable_info *info = data;
920 int ret;
921 char *status = get_property(dn, "status", NULL);
922 u32 *class_code = (u32 *)get_property(dn, "class-code", NULL);
923 u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL);
924 u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
925 u32 *regs;
926 int enable;
927 struct pci_dn *pdn = PCI_DN(dn);
928
929 pdn->eeh_mode = 0;
930 pdn->eeh_check_count = 0;
931 pdn->eeh_freeze_count = 0;
932
933 if (status && strcmp(status, "ok") != 0)
934 return NULL; /* ignore devices with bad status */
935
936 /* Ignore bad nodes. */
937 if (!class_code || !vendor_id || !device_id)
938 return NULL;
939
940 /* There is nothing to check on PCI to ISA bridges */
941 if (dn->type && !strcmp(dn->type, "isa")) {
942 pdn->eeh_mode |= EEH_MODE_NOCHECK;
943 return NULL;
944 }
945
946 /*
947 * Now decide if we are going to "Disable" EEH checking
948 * for this device. We still run with the EEH hardware active,
949 * but we won't be checking for ff's. This means a driver
950 * could return bad data (very bad!), an interrupt handler could
951 * hang waiting on status bits that won't change, etc.
952 * But there are a few cases like display devices that make sense.
953 */
954 enable = 1; /* i.e. we will do checking */
955 if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY)
956 enable = 0;
957
958 if (!enable)
959 pdn->eeh_mode |= EEH_MODE_NOCHECK;
960
961 /* Ok... see if this device supports EEH. Some do, some don't,
962 * and the only way to find out is to check each and every one. */
963 regs = (u32 *)get_property(dn, "reg", NULL);
964 if (regs) {
965 /* First register entry is addr (00BBSS00) */
966 /* Try to enable eeh */
967 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
968 regs[0], info->buid_hi, info->buid_lo,
969 EEH_ENABLE);
970
971 if (ret == 0) {
972 eeh_subsystem_enabled = 1;
973 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
974 pdn->eeh_config_addr = regs[0];
975#ifdef DEBUG
976 printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name);
977#endif
978 } else {
979
980 /* This device doesn't support EEH, but it may have an
981 * EEH parent, in which case we mark it as supported. */
982 if (dn->parent && PCI_DN(dn->parent)
983 && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
984 /* Parent supports EEH. */
985 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
986 pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr;
987 return NULL;
988 }
989 }
990 } else {
991 printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
992 dn->full_name);
993 }
994
995 return NULL;
996}
997
998/*
999 * Initialize EEH by trying to enable it for all of the adapters in the system.
1000 * As a side effect we can determine here if eeh is supported at all.
1001 * Note that we leave EEH on so failed config cycles won't cause a machine
1002 * check. If a user turns off EEH for a particular adapter they are really
1003 * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
1004 * grant access to a slot if EEH isn't enabled, and so we always enable
1005 * EEH for all slots/all devices.
1006 *
1007 * The eeh-force-off option disables EEH checking globally, for all slots.
1008 * Even if force-off is set, the EEH hardware is still enabled, so that
1009 * newer systems can boot.
1010 */
1011void __init eeh_init(void)
1012{
1013 struct device_node *phb, *np;
1014 struct eeh_early_enable_info info;
1015
1016 spin_lock_init(&confirm_error_lock);
1017 spin_lock_init(&slot_errbuf_lock);
1018
1019 np = of_find_node_by_path("/rtas");
1020 if (np == NULL)
1021 return;
1022
1023 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
1024 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
1025 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
1026 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
1027 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
1028
1029 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
1030 return;
1031
1032 eeh_error_buf_size = rtas_token("rtas-error-log-max");
1033 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
1034 eeh_error_buf_size = 1024;
1035 }
1036 if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
1037 printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated "
1038 "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
1039 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
1040 }
1041
1042 /* Enable EEH for all adapters. Note that eeh requires buid's */
1043 for (phb = of_find_node_by_name(NULL, "pci"); phb;
1044 phb = of_find_node_by_name(phb, "pci")) {
1045 unsigned long buid;
1046
1047 buid = get_phb_buid(phb);
1048 if (buid == 0 || PCI_DN(phb) == NULL)
1049 continue;
1050
1051 info.buid_lo = BUID_LO(buid);
1052 info.buid_hi = BUID_HI(buid);
1053 traverse_pci_devices(phb, early_enable_eeh, &info);
1054 }
1055
1056 if (eeh_subsystem_enabled)
1057 printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
1058 else
1059 printk(KERN_WARNING "EEH: No capable adapters found\n");
1060}
1061
1062/**
1063 * eeh_add_device_early - enable EEH for the indicated device_node
1064 * @dn: device node for which to set up EEH
1065 *
1066 * This routine must be used to perform EEH initialization for PCI
1067 * devices that were added after system boot (e.g. hotplug, dlpar).
1068 * This routine must be called before any i/o is performed to the
1069 * adapter (inluding any config-space i/o).
1070 * Whether this actually enables EEH or not for this device depends
1071 * on the CEC architecture, type of the device, on earlier boot
1072 * command-line arguments & etc.
1073 */
1074void eeh_add_device_early(struct device_node *dn)
1075{
1076 struct pci_controller *phb;
1077 struct eeh_early_enable_info info;
1078
1079 if (!dn || !PCI_DN(dn))
1080 return;
1081 phb = PCI_DN(dn)->phb;
1082 if (NULL == phb || 0 == phb->buid) {
1083 printk(KERN_WARNING "EEH: Expected buid but found none for %s\n",
1084 dn->full_name);
1085 dump_stack();
1086 return;
1087 }
1088
1089 info.buid_hi = BUID_HI(phb->buid);
1090 info.buid_lo = BUID_LO(phb->buid);
1091 early_enable_eeh(dn, &info);
1092}
1093EXPORT_SYMBOL_GPL(eeh_add_device_early);
1094
1095/**
1096 * eeh_add_device_late - perform EEH initialization for the indicated pci device
1097 * @dev: pci device for which to set up EEH
1098 *
1099 * This routine must be used to complete EEH initialization for PCI
1100 * devices that were added after system boot (e.g. hotplug, dlpar).
1101 */
1102void eeh_add_device_late(struct pci_dev *dev)
1103{
1104 struct device_node *dn;
1105 struct pci_dn *pdn;
1106
1107 if (!dev || !eeh_subsystem_enabled)
1108 return;
1109
1110#ifdef DEBUG
1111 printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
1112#endif
1113
1114 pci_dev_get (dev);
1115 dn = pci_device_to_OF_node(dev);
1116 pdn = PCI_DN(dn);
1117 pdn->pcidev = dev;
1118
1119 pci_addr_cache_insert_device (dev);
1120 eeh_save_bars(dev, pdn);
1121}
1122EXPORT_SYMBOL_GPL(eeh_add_device_late);
1123
1124/**
1125 * eeh_remove_device - undo EEH setup for the indicated pci device
1126 * @dev: pci device to be removed
1127 *
1128 * This routine should be when a device is removed from a running
1129 * system (e.g. by hotplug or dlpar).
1130 */
1131void eeh_remove_device(struct pci_dev *dev)
1132{
1133 struct device_node *dn;
1134 if (!dev || !eeh_subsystem_enabled)
1135 return;
1136
1137 /* Unregister the device with the EEH/PCI address search system */
1138#ifdef DEBUG
1139 printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
1140#endif
1141 pci_addr_cache_remove_device(dev);
1142
1143 dn = pci_device_to_OF_node(dev);
1144 PCI_DN(dn)->pcidev = NULL;
1145 pci_dev_put (dev);
1146}
1147EXPORT_SYMBOL_GPL(eeh_remove_device);
1148
1149static int proc_eeh_show(struct seq_file *m, void *v)
1150{
1151 unsigned int cpu;
1152 unsigned long ffs = 0, positives = 0, failures = 0;
1153 unsigned long resets = 0;
1154 unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
1155
1156 for_each_cpu(cpu) {
1157 ffs += per_cpu(total_mmio_ffs, cpu);
1158 positives += per_cpu(false_positives, cpu);
1159 failures += per_cpu(ignored_failures, cpu);
1160 resets += per_cpu(slot_resets, cpu);
1161 no_dev += per_cpu(no_device, cpu);
1162 no_dn += per_cpu(no_dn, cpu);
1163 no_cfg += per_cpu(no_cfg_addr, cpu);
1164 no_check += per_cpu(ignored_check, cpu);
1165 }
1166
1167 if (0 == eeh_subsystem_enabled) {
1168 seq_printf(m, "EEH Subsystem is globally disabled\n");
1169 seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs);
1170 } else {
1171 seq_printf(m, "EEH Subsystem is enabled\n");
1172 seq_printf(m,
1173 "no device=%ld\n"
1174 "no device node=%ld\n"
1175 "no config address=%ld\n"
1176 "check not wanted=%ld\n"
1177 "eeh_total_mmio_ffs=%ld\n"
1178 "eeh_false_positives=%ld\n"
1179 "eeh_ignored_failures=%ld\n"
1180 "eeh_slot_resets=%ld\n",
1181 no_dev, no_dn, no_cfg, no_check,
1182 ffs, positives, failures, resets);
1183 }
1184
1185 return 0;
1186}
1187
1188static int proc_eeh_open(struct inode *inode, struct file *file)
1189{
1190 return single_open(file, proc_eeh_show, NULL);
1191}
1192
1193static struct file_operations proc_eeh_operations = {
1194 .open = proc_eeh_open,
1195 .read = seq_read,
1196 .llseek = seq_lseek,
1197 .release = single_release,
1198};
1199
1200static int __init eeh_init_proc(void)
1201{
1202 struct proc_dir_entry *e;
1203
1204 if (platform_is_pseries()) {
1205 e = create_proc_entry("ppc64/eeh", 0, NULL);
1206 if (e)
1207 e->proc_fops = &proc_eeh_operations;
1208 }
1209
1210 return 0;
1211}
1212__initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
new file mode 100644
index 000000000000..92497333c2b6
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -0,0 +1,155 @@
1/*
2 * eeh_event.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
19 */
20
21#include <linux/list.h>
22#include <linux/pci.h>
23#include <asm/eeh_event.h>
24
25/** Overview:
26 * EEH error states may be detected within exception handlers;
27 * however, the recovery processing needs to occur asynchronously
28 * in a normal kernel context and not an interrupt context.
29 * This pair of routines creates an event and queues it onto a
30 * work-queue, where a worker thread can drive recovery.
31 */
32
33/* EEH event workqueue setup. */
34static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
35LIST_HEAD(eeh_eventlist);
36static void eeh_thread_launcher(void *);
37DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
38
39/**
40 * eeh_panic - call panic() for an eeh event that cannot be handled.
41 * The philosophy of this routine is that it is better to panic and
42 * halt the OS than it is to risk possible data corruption by
43 * oblivious device drivers that don't know better.
44 *
45 * @dev pci device that had an eeh event
46 * @reset_state current reset state of the device slot
47 */
48static void eeh_panic(struct pci_dev *dev, int reset_state)
49{
50 /*
51 * Since the panic_on_oops sysctl is used to halt the system
52 * in light of potential corruption, we can use it here.
53 */
54 if (panic_on_oops) {
55 panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
56 pci_name(dev));
57 }
58 else {
59 printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
60 reset_state, pci_name(dev));
61 }
62}
63
64/**
65 * eeh_event_handler - dispatch EEH events. The detection of a frozen
66 * slot can occur inside an interrupt, where it can be hard to do
67 * anything about it. The goal of this routine is to pull these
68 * detection events out of the context of the interrupt handler, and
69 * re-dispatch them for processing at a later time in a normal context.
70 *
71 * @dummy - unused
72 */
73static int eeh_event_handler(void * dummy)
74{
75 unsigned long flags;
76 struct eeh_event *event;
77
78 daemonize ("eehd");
79
80 while (1) {
81 set_current_state(TASK_INTERRUPTIBLE);
82
83 spin_lock_irqsave(&eeh_eventlist_lock, flags);
84 event = NULL;
85 if (!list_empty(&eeh_eventlist)) {
86 event = list_entry(eeh_eventlist.next, struct eeh_event, list);
87 list_del(&event->list);
88 }
89 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
90 if (event == NULL)
91 break;
92
93 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
94 pci_name(event->dev));
95
96 eeh_panic (event->dev, event->state);
97
98 kfree(event);
99 }
100
101 return 0;
102}
103
104/**
105 * eeh_thread_launcher
106 *
107 * @dummy - unused
108 */
109static void eeh_thread_launcher(void *dummy)
110{
111 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
112 printk(KERN_ERR "Failed to start EEH daemon\n");
113}
114
115/**
116 * eeh_send_failure_event - generate a PCI error event
117 * @dev pci device
118 *
119 * This routine can be called within an interrupt context;
120 * the actual event will be delivered in a normal context
121 * (from a workqueue).
122 */
123int eeh_send_failure_event (struct device_node *dn,
124 struct pci_dev *dev,
125 int state,
126 int time_unavail)
127{
128 unsigned long flags;
129 struct eeh_event *event;
130
131 event = kmalloc(sizeof(*event), GFP_ATOMIC);
132 if (event == NULL) {
133 printk (KERN_ERR "EEH: out of memory, event not handled\n");
134 return 1;
135 }
136
137 if (dev)
138 pci_dev_get(dev);
139
140 event->dn = dn;
141 event->dev = dev;
142 event->state = state;
143 event->time_unavail = time_unavail;
144
145 /* We may or may not be called in an interrupt context */
146 spin_lock_irqsave(&eeh_eventlist_lock, flags);
147 list_add(&event->list, &eeh_eventlist);
148 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
149
150 schedule_work(&eeh_event_wq);
151
152 return 0;
153}
154
155/********************** END OF FILE ******************************/
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index fcc50bfd43fd..97ba5214417f 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -42,7 +42,6 @@
42#include <asm/machdep.h> 42#include <asm/machdep.h>
43#include <asm/abs_addr.h> 43#include <asm/abs_addr.h>
44#include <asm/pSeries_reconfig.h> 44#include <asm/pSeries_reconfig.h>
45#include <asm/systemcfg.h>
46#include <asm/firmware.h> 45#include <asm/firmware.h>
47#include <asm/tce.h> 46#include <asm/tce.h>
48#include <asm/ppc-pci.h> 47#include <asm/ppc-pci.h>
@@ -582,7 +581,7 @@ void iommu_init_early_pSeries(void)
582 return; 581 return;
583 } 582 }
584 583
585 if (systemcfg->platform & PLATFORM_LPAR) { 584 if (platform_is_lpar()) {
586 if (firmware_has_feature(FW_FEATURE_MULTITCE)) { 585 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
587 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 586 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
588 ppc_md.tce_free = tce_freemulti_pSeriesLP; 587 ppc_md.tce_free = tce_freemulti_pSeriesLP;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index c198656a3bb5..999a9620b5ce 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -107,7 +107,6 @@ static void __init pSeries_request_regions(void)
107 107
108void __init pSeries_final_fixup(void) 108void __init pSeries_final_fixup(void)
109{ 109{
110 phbs_remap_io();
111 pSeries_request_regions(); 110 pSeries_request_regions();
112 111
113 pci_addr_cache_build(); 112 pci_addr_cache_build();
@@ -123,7 +122,7 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
123 int i; 122 int i;
124 unsigned int reg; 123 unsigned int reg;
125 124
126 if (!(systemcfg->platform & PLATFORM_PSERIES)) 125 if (!platform_is_pseries())
127 return; 126 return;
128 127
129 printk("Using INTC for W82c105 IDE controller.\n"); 128 printk("Using INTC for W82c105 IDE controller.\n");
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d7d400339458..d8864164dbe8 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -408,7 +408,7 @@ static int proc_ppc64_create_ofdt(void)
408{ 408{
409 struct proc_dir_entry *ent; 409 struct proc_dir_entry *ent;
410 410
411 if (!(systemcfg->platform & PLATFORM_PSERIES)) 411 if (!platform_is_pseries())
412 return 0; 412 return 0;
413 413
414 ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL); 414 ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index e26b0420b6dd..00cf331a1dc4 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -482,10 +482,12 @@ static int __init rtas_init(void)
482{ 482{
483 struct proc_dir_entry *entry; 483 struct proc_dir_entry *entry;
484 484
485 /* No RTAS, only warn if we are on a pSeries box */ 485 if (!platform_is_pseries())
486 return 0;
487
488 /* No RTAS */
486 if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { 489 if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
487 if (systemcfg->platform & PLATFORM_PSERIES) 490 printk(KERN_INFO "rtasd: no event-scan on system\n");
488 printk(KERN_INFO "rtasd: no event-scan on system\n");
489 return 1; 491 return 1;
490 } 492 }
491 493
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
new file mode 100644
index 000000000000..2edc947f7c44
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -0,0 +1,235 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * scan-log-data driver for PPC64 Todd Inglett <tinglett@vnet.ibm.com>
10 *
11 * When ppc64 hardware fails the service processor dumps internal state
12 * of the system. After a reboot the operating system can access a dump
13 * of this data using this driver. A dump exists if the device-tree
14 * /chosen/ibm,scan-log-data property exists.
15 *
16 * This driver exports /proc/ppc64/scan-log-dump which can be read.
17 * The driver supports only sequential reads.
18 *
19 * The driver looks at a write to the driver for the single word "reset".
20 * If given, the driver will reset the scanlog so the platform can free it.
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/errno.h>
26#include <linux/proc_fs.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <asm/uaccess.h>
30#include <asm/rtas.h>
31#include <asm/prom.h>
32
33#define MODULE_VERS "1.0"
34#define MODULE_NAME "scanlog"
35
36/* Status returns from ibm,scan-log-dump */
37#define SCANLOG_COMPLETE 0
38#define SCANLOG_HWERROR -1
39#define SCANLOG_CONTINUE 1
40
41#define DEBUG(A...) do { if (scanlog_debug) printk(KERN_ERR "scanlog: " A); } while (0)
42
43static int scanlog_debug;
44static unsigned int ibm_scan_log_dump; /* RTAS token */
45static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
46
47static ssize_t scanlog_read(struct file *file, char __user *buf,
48 size_t count, loff_t *ppos)
49{
50 struct inode * inode = file->f_dentry->d_inode;
51 struct proc_dir_entry *dp;
52 unsigned int *data;
53 int status;
54 unsigned long len, off;
55 unsigned int wait_time;
56
57 dp = PDE(inode);
58 data = (unsigned int *)dp->data;
59
60 if (!data) {
61 printk(KERN_ERR "scanlog: read failed no data\n");
62 return -EIO;
63 }
64
65 if (count > RTAS_DATA_BUF_SIZE)
66 count = RTAS_DATA_BUF_SIZE;
67
68 if (count < 1024) {
69 /* This is the min supported by this RTAS call. Rather
70 * than do all the buffering we insist the user code handle
71 * larger reads. As long as cp works... :)
72 */
73 printk(KERN_ERR "scanlog: cannot perform a small read (%ld)\n", count);
74 return -EINVAL;
75 }
76
77 if (!access_ok(VERIFY_WRITE, buf, count))
78 return -EFAULT;
79
80 for (;;) {
81 wait_time = 500; /* default wait if no data */
82 spin_lock(&rtas_data_buf_lock);
83 memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE);
84 status = rtas_call(ibm_scan_log_dump, 2, 1, NULL,
85 (u32) __pa(rtas_data_buf), (u32) count);
86 memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE);
87 spin_unlock(&rtas_data_buf_lock);
88
89 DEBUG("status=%d, data[0]=%x, data[1]=%x, data[2]=%x\n",
90 status, data[0], data[1], data[2]);
91 switch (status) {
92 case SCANLOG_COMPLETE:
93 DEBUG("hit eof\n");
94 return 0;
95 case SCANLOG_HWERROR:
96 DEBUG("hardware error reading scan log data\n");
97 return -EIO;
98 case SCANLOG_CONTINUE:
99 /* We may or may not have data yet */
100 len = data[1];
101 off = data[2];
102 if (len > 0) {
103 if (copy_to_user(buf, ((char *)data)+off, len))
104 return -EFAULT;
105 return len;
106 }
107 /* Break to sleep default time */
108 break;
109 default:
110 if (status > 9900 && status <= 9905) {
111 wait_time = rtas_extended_busy_delay_time(status);
112 } else {
113 printk(KERN_ERR "scanlog: unknown error from rtas: %d\n", status);
114 return -EIO;
115 }
116 }
117 /* Apparently no data yet. Wait and try again. */
118 msleep_interruptible(wait_time);
119 }
120 /*NOTREACHED*/
121}
122
123static ssize_t scanlog_write(struct file * file, const char __user * buf,
124 size_t count, loff_t *ppos)
125{
126 char stkbuf[20];
127 int status;
128
129 if (count > 19) count = 19;
130 if (copy_from_user (stkbuf, buf, count)) {
131 return -EFAULT;
132 }
133 stkbuf[count] = 0;
134
135 if (buf) {
136 if (strncmp(stkbuf, "reset", 5) == 0) {
137 DEBUG("reset scanlog\n");
138 status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 0, 0);
139 DEBUG("rtas returns %d\n", status);
140 } else if (strncmp(stkbuf, "debugon", 7) == 0) {
141 printk(KERN_ERR "scanlog: debug on\n");
142 scanlog_debug = 1;
143 } else if (strncmp(stkbuf, "debugoff", 8) == 0) {
144 printk(KERN_ERR "scanlog: debug off\n");
145 scanlog_debug = 0;
146 }
147 }
148 return count;
149}
150
151static int scanlog_open(struct inode * inode, struct file * file)
152{
153 struct proc_dir_entry *dp = PDE(inode);
154 unsigned int *data = (unsigned int *)dp->data;
155
156 if (!data) {
157 printk(KERN_ERR "scanlog: open failed no data\n");
158 return -EIO;
159 }
160
161 if (data[0] != 0) {
162 /* This imperfect test stops a second copy of the
163 * data (or a reset while data is being copied)
164 */
165 return -EBUSY;
166 }
167
168 data[0] = 0; /* re-init so we restart the scan */
169
170 return 0;
171}
172
173static int scanlog_release(struct inode * inode, struct file * file)
174{
175 struct proc_dir_entry *dp = PDE(inode);
176 unsigned int *data = (unsigned int *)dp->data;
177
178 if (!data) {
179 printk(KERN_ERR "scanlog: release failed no data\n");
180 return -EIO;
181 }
182 data[0] = 0;
183
184 return 0;
185}
186
187struct file_operations scanlog_fops = {
188 .owner = THIS_MODULE,
189 .read = scanlog_read,
190 .write = scanlog_write,
191 .open = scanlog_open,
192 .release = scanlog_release,
193};
194
195int __init scanlog_init(void)
196{
197 struct proc_dir_entry *ent;
198
199 ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
200 if (ibm_scan_log_dump == RTAS_UNKNOWN_SERVICE) {
201 printk(KERN_ERR "scan-log-dump not implemented on this system\n");
202 return -EIO;
203 }
204
205 ent = create_proc_entry("ppc64/rtas/scan-log-dump", S_IRUSR, NULL);
206 if (ent) {
207 ent->proc_fops = &scanlog_fops;
208 /* Ideally we could allocate a buffer < 4G */
209 ent->data = kmalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
210 if (!ent->data) {
211 printk(KERN_ERR "Failed to allocate a buffer\n");
212 remove_proc_entry("scan-log-dump", ent->parent);
213 return -ENOMEM;
214 }
215 ((unsigned int *)ent->data)[0] = 0;
216 } else {
217 printk(KERN_ERR "Failed to create ppc64/scan-log-dump proc entry\n");
218 return -EIO;
219 }
220 proc_ppc64_scan_log_dump = ent;
221
222 return 0;
223}
224
225void __exit scanlog_cleanup(void)
226{
227 if (proc_ppc64_scan_log_dump) {
228 kfree(proc_ppc64_scan_log_dump->data);
229 remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent);
230 }
231}
232
233module_init(scanlog_init);
234module_exit(scanlog_cleanup);
235MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a093a0d4dd69..e94247c28d42 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -249,7 +249,7 @@ static void __init pSeries_setup_arch(void)
249 ppc_md.idle_loop = default_idle; 249 ppc_md.idle_loop = default_idle;
250 } 250 }
251 251
252 if (systemcfg->platform & PLATFORM_LPAR) 252 if (platform_is_lpar())
253 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 253 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
254 else 254 else
255 ppc_md.enable_pmcs = power4_enable_pmcs; 255 ppc_md.enable_pmcs = power4_enable_pmcs;
@@ -378,7 +378,7 @@ static void __init pSeries_init_early(void)
378 378
379 fw_feature_init(); 379 fw_feature_init();
380 380
381 if (systemcfg->platform & PLATFORM_LPAR) 381 if (platform_is_lpar())
382 hpte_init_lpar(); 382 hpte_init_lpar();
383 else { 383 else {
384 hpte_init_native(); 384 hpte_init_native();
@@ -388,7 +388,7 @@ static void __init pSeries_init_early(void)
388 388
389 generic_find_legacy_serial_ports(&physport, &default_speed); 389 generic_find_legacy_serial_ports(&physport, &default_speed);
390 390
391 if (systemcfg->platform & PLATFORM_LPAR) 391 if (platform_is_lpar())
392 find_udbg_vterm(); 392 find_udbg_vterm();
393 else if (physport) { 393 else if (physport) {
394 /* Map the uart for udbg. */ 394 /* Map the uart for udbg. */
@@ -592,7 +592,7 @@ static void pseries_shared_idle(void)
592 592
593static int pSeries_pci_probe_mode(struct pci_bus *bus) 593static int pSeries_pci_probe_mode(struct pci_bus *bus)
594{ 594{
595 if (systemcfg->platform & PLATFORM_LPAR) 595 if (platform_is_lpar())
596 return PCI_PROBE_DEVTREE; 596 return PCI_PROBE_DEVTREE;
597 return PCI_PROBE_NORMAL; 597 return PCI_PROBE_NORMAL;
598} 598}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 7a243e8ccd7e..3ba794ca3288 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -46,6 +46,7 @@
46#include <asm/rtas.h> 46#include <asm/rtas.h>
47#include <asm/pSeries_reconfig.h> 47#include <asm/pSeries_reconfig.h>
48#include <asm/mpic.h> 48#include <asm/mpic.h>
49#include <asm/systemcfg.h>
49 50
50#include "plpar_wrappers.h" 51#include "plpar_wrappers.h"
51 52
@@ -96,7 +97,7 @@ int pSeries_cpu_disable(void)
96 int cpu = smp_processor_id(); 97 int cpu = smp_processor_id();
97 98
98 cpu_clear(cpu, cpu_online_map); 99 cpu_clear(cpu, cpu_online_map);
99 systemcfg->processorCount--; 100 _systemcfg->processorCount--;
100 101
101 /*fix boot_cpuid here*/ 102 /*fix boot_cpuid here*/
102 if (cpu == boot_cpuid) 103 if (cpu == boot_cpuid)
@@ -441,7 +442,7 @@ void __init smp_init_pSeries(void)
441 smp_ops->cpu_die = pSeries_cpu_die; 442 smp_ops->cpu_die = pSeries_cpu_die;
442 443
443 /* Processors can be added/removed only on LPAR */ 444 /* Processors can be added/removed only on LPAR */
444 if (systemcfg->platform == PLATFORM_PSERIES_LPAR) 445 if (platform_is_lpar())
445 pSeries_reconfig_notifier_register(&pSeries_smp_nb); 446 pSeries_reconfig_notifier_register(&pSeries_smp_nb);
446#endif 447#endif
447 448
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index c72c86f05cb6..72ac18067ece 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -545,7 +545,9 @@ nextnode:
545 of_node_put(np); 545 of_node_put(np);
546 } 546 }
547 547
548 if (systemcfg->platform == PLATFORM_PSERIES) { 548 if (platform_is_lpar())
549 ops = &pSeriesLP_ops;
550 else {
549#ifdef CONFIG_SMP 551#ifdef CONFIG_SMP
550 for_each_cpu(i) { 552 for_each_cpu(i) {
551 int hard_id; 553 int hard_id;
@@ -561,12 +563,11 @@ nextnode:
561#else 563#else
562 xics_per_cpu[0] = ioremap(intr_base, intr_size); 564 xics_per_cpu[0] = ioremap(intr_base, intr_size);
563#endif /* CONFIG_SMP */ 565#endif /* CONFIG_SMP */
564 } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
565 ops = &pSeriesLP_ops;
566 } 566 }
567 567
568 xics_8259_pic.enable = i8259_pic.enable; 568 xics_8259_pic.enable = i8259_pic.enable;
569 xics_8259_pic.disable = i8259_pic.disable; 569 xics_8259_pic.disable = i8259_pic.disable;
570 xics_8259_pic.end = i8259_pic.end;
570 for (i = 0; i < 16; ++i) 571 for (i = 0; i < 16; ++i)
571 get_irq_desc(i)->handler = &xics_8259_pic; 572 get_irq_desc(i)->handler = &xics_8259_pic;
572 for (; i < NR_IRQS; ++i) 573 for (; i < NR_IRQS; ++i)
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 543d65909812..f32baf7f4693 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -226,7 +226,7 @@ static void iommu_table_u3_setup(void)
226 iommu_table_u3.it_busno = 0; 226 iommu_table_u3.it_busno = 0;
227 iommu_table_u3.it_offset = 0; 227 iommu_table_u3.it_offset = 0;
228 /* it_size is in number of entries */ 228 /* it_size is in number of entries */
229 iommu_table_u3.it_size = dart_tablesize / sizeof(u32); 229 iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
230 230
231 /* Initialize the common IOMMU code */ 231 /* Initialize the common IOMMU code */
232 iommu_table_u3.it_base = (unsigned long)dart_vbase; 232 iommu_table_u3.it_base = (unsigned long)dart_vbase;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 79a784f0e7a9..b20312e5ed27 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_8xx) += start_8xx.o
8obj-$(CONFIG_6xx) += start_32.o 8obj-$(CONFIG_6xx) += start_32.o
9obj-$(CONFIG_4xx) += start_32.o 9obj-$(CONFIG_4xx) += start_32.o
10obj-$(CONFIG_PPC64) += start_64.o 10obj-$(CONFIG_PPC64) += start_64.o
11obj-y += xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o 11obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
new file mode 100644
index 000000000000..78765833f4c0
--- /dev/null
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -0,0 +1,134 @@
1/*
2 * Copyright (C) 1996-2005 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/string.h>
10#include <asm/time.h>
11#include "nonstdio.h"
12
13int xmon_putchar(int c)
14{
15 char ch = c;
16
17 if (c == '\n')
18 xmon_putchar('\r');
19 return xmon_write(&ch, 1) == 1? c: -1;
20}
21
22static char line[256];
23static char *lineptr;
24static int lineleft;
25
26int xmon_expect(const char *str, unsigned long timeout)
27{
28 int c;
29 unsigned long t0;
30
31 /* assume 25MHz default timebase if tb_ticks_per_sec not set yet */
32 timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000;
33 t0 = get_tbl();
34 do {
35 lineptr = line;
36 for (;;) {
37 c = xmon_read_poll();
38 if (c == -1) {
39 if (get_tbl() - t0 > timeout)
40 return 0;
41 continue;
42 }
43 if (c == '\n')
44 break;
45 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
46 *lineptr++ = c;
47 }
48 *lineptr = 0;
49 } while (strstr(line, str) == NULL);
50 return 1;
51}
52
53int xmon_getchar(void)
54{
55 int c;
56
57 if (lineleft == 0) {
58 lineptr = line;
59 for (;;) {
60 c = xmon_readchar();
61 if (c == -1 || c == 4)
62 break;
63 if (c == '\r' || c == '\n') {
64 *lineptr++ = '\n';
65 xmon_putchar('\n');
66 break;
67 }
68 switch (c) {
69 case 0177:
70 case '\b':
71 if (lineptr > line) {
72 xmon_putchar('\b');
73 xmon_putchar(' ');
74 xmon_putchar('\b');
75 --lineptr;
76 }
77 break;
78 case 'U' & 0x1F:
79 while (lineptr > line) {
80 xmon_putchar('\b');
81 xmon_putchar(' ');
82 xmon_putchar('\b');
83 --lineptr;
84 }
85 break;
86 default:
87 if (lineptr >= &line[sizeof(line) - 1])
88 xmon_putchar('\a');
89 else {
90 xmon_putchar(c);
91 *lineptr++ = c;
92 }
93 }
94 }
95 lineleft = lineptr - line;
96 lineptr = line;
97 }
98 if (lineleft == 0)
99 return -1;
100 --lineleft;
101 return *lineptr++;
102}
103
104char *xmon_gets(char *str, int nb)
105{
106 char *p;
107 int c;
108
109 for (p = str; p < str + nb - 1; ) {
110 c = xmon_getchar();
111 if (c == -1) {
112 if (p == str)
113 return NULL;
114 break;
115 }
116 *p++ = c;
117 if (c == '\n')
118 break;
119 }
120 *p = 0;
121 return str;
122}
123
124void xmon_printf(const char *format, ...)
125{
126 va_list args;
127 int n;
128 static char xmon_outbuf[1024];
129
130 va_start(args, format);
131 n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args);
132 va_end(args);
133 xmon_write(xmon_outbuf, n);
134}
diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 84211a21c6f4..47cebbd2b1b1 100644
--- a/arch/powerpc/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
@@ -1,22 +1,14 @@
1typedef int FILE;
2extern FILE *xmon_stdin, *xmon_stdout;
3#define EOF (-1) 1#define EOF (-1)
4#define stdin xmon_stdin 2
5#define stdout xmon_stdout
6#define printf xmon_printf 3#define printf xmon_printf
7#define fprintf xmon_fprintf
8#define fputs xmon_fputs
9#define fgets xmon_fgets
10#define putchar xmon_putchar 4#define putchar xmon_putchar
11#define getchar xmon_getchar
12#define putc xmon_putc
13#define getc xmon_getc
14#define fopen(n, m) NULL
15#define fflush(f) do {} while (0)
16#define fclose(f) do {} while (0)
17extern char *fgets(char *, int, void *);
18extern void xmon_printf(const char *, ...);
19extern void xmon_fprintf(void *, const char *, ...);
20extern void xmon_sprintf(char *, const char *, ...);
21 5
22#define perror(s) printf("%s: no files!\n", (s)) 6extern int xmon_putchar(int c);
7extern int xmon_getchar(void);
8extern char *xmon_gets(char *, int);
9extern void xmon_printf(const char *, ...);
10extern void xmon_map_scc(void);
11extern int xmon_expect(const char *str, unsigned long timeout);
12extern int xmon_write(void *ptr, int nb);
13extern int xmon_readchar(void);
14extern int xmon_read_poll(void);
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
index f8e40dfd2bff..96a91f10e2ec 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/xmon/setjmp.S
@@ -14,61 +14,61 @@
14 14
15_GLOBAL(xmon_setjmp) 15_GLOBAL(xmon_setjmp)
16 mflr r0 16 mflr r0
17 STL r0,0(r3) 17 PPC_STL r0,0(r3)
18 STL r1,SZL(r3) 18 PPC_STL r1,SZL(r3)
19 STL r2,2*SZL(r3) 19 PPC_STL r2,2*SZL(r3)
20 mfcr r0 20 mfcr r0
21 STL r0,3*SZL(r3) 21 PPC_STL r0,3*SZL(r3)
22 STL r13,4*SZL(r3) 22 PPC_STL r13,4*SZL(r3)
23 STL r14,5*SZL(r3) 23 PPC_STL r14,5*SZL(r3)
24 STL r15,6*SZL(r3) 24 PPC_STL r15,6*SZL(r3)
25 STL r16,7*SZL(r3) 25 PPC_STL r16,7*SZL(r3)
26 STL r17,8*SZL(r3) 26 PPC_STL r17,8*SZL(r3)
27 STL r18,9*SZL(r3) 27 PPC_STL r18,9*SZL(r3)
28 STL r19,10*SZL(r3) 28 PPC_STL r19,10*SZL(r3)
29 STL r20,11*SZL(r3) 29 PPC_STL r20,11*SZL(r3)
30 STL r21,12*SZL(r3) 30 PPC_STL r21,12*SZL(r3)
31 STL r22,13*SZL(r3) 31 PPC_STL r22,13*SZL(r3)
32 STL r23,14*SZL(r3) 32 PPC_STL r23,14*SZL(r3)
33 STL r24,15*SZL(r3) 33 PPC_STL r24,15*SZL(r3)
34 STL r25,16*SZL(r3) 34 PPC_STL r25,16*SZL(r3)
35 STL r26,17*SZL(r3) 35 PPC_STL r26,17*SZL(r3)
36 STL r27,18*SZL(r3) 36 PPC_STL r27,18*SZL(r3)
37 STL r28,19*SZL(r3) 37 PPC_STL r28,19*SZL(r3)
38 STL r29,20*SZL(r3) 38 PPC_STL r29,20*SZL(r3)
39 STL r30,21*SZL(r3) 39 PPC_STL r30,21*SZL(r3)
40 STL r31,22*SZL(r3) 40 PPC_STL r31,22*SZL(r3)
41 li r3,0 41 li r3,0
42 blr 42 blr
43 43
44_GLOBAL(xmon_longjmp) 44_GLOBAL(xmon_longjmp)
45 CMPI r4,0 45 PPC_LCMPI r4,0
46 bne 1f 46 bne 1f
47 li r4,1 47 li r4,1
481: LDL r13,4*SZL(r3) 481: PPC_LL r13,4*SZL(r3)
49 LDL r14,5*SZL(r3) 49 PPC_LL r14,5*SZL(r3)
50 LDL r15,6*SZL(r3) 50 PPC_LL r15,6*SZL(r3)
51 LDL r16,7*SZL(r3) 51 PPC_LL r16,7*SZL(r3)
52 LDL r17,8*SZL(r3) 52 PPC_LL r17,8*SZL(r3)
53 LDL r18,9*SZL(r3) 53 PPC_LL r18,9*SZL(r3)
54 LDL r19,10*SZL(r3) 54 PPC_LL r19,10*SZL(r3)
55 LDL r20,11*SZL(r3) 55 PPC_LL r20,11*SZL(r3)
56 LDL r21,12*SZL(r3) 56 PPC_LL r21,12*SZL(r3)
57 LDL r22,13*SZL(r3) 57 PPC_LL r22,13*SZL(r3)
58 LDL r23,14*SZL(r3) 58 PPC_LL r23,14*SZL(r3)
59 LDL r24,15*SZL(r3) 59 PPC_LL r24,15*SZL(r3)
60 LDL r25,16*SZL(r3) 60 PPC_LL r25,16*SZL(r3)
61 LDL r26,17*SZL(r3) 61 PPC_LL r26,17*SZL(r3)
62 LDL r27,18*SZL(r3) 62 PPC_LL r27,18*SZL(r3)
63 LDL r28,19*SZL(r3) 63 PPC_LL r28,19*SZL(r3)
64 LDL r29,20*SZL(r3) 64 PPC_LL r29,20*SZL(r3)
65 LDL r30,21*SZL(r3) 65 PPC_LL r30,21*SZL(r3)
66 LDL r31,22*SZL(r3) 66 PPC_LL r31,22*SZL(r3)
67 LDL r0,3*SZL(r3) 67 PPC_LL r0,3*SZL(r3)
68 mtcrf 0x38,r0 68 mtcrf 0x38,r0
69 LDL r0,0(r3) 69 PPC_LL r0,0(r3)
70 LDL r1,SZL(r3) 70 PPC_LL r1,SZL(r3)
71 LDL r2,2*SZL(r3) 71 PPC_LL r2,2*SZL(r3)
72 mtlr r0 72 mtlr r0
73 mr r3,r4 73 mr r3,r4
74 blr 74 blr
@@ -84,52 +84,52 @@ _GLOBAL(xmon_longjmp)
84 * different ABIs, though). 84 * different ABIs, though).
85 */ 85 */
86_GLOBAL(xmon_save_regs) 86_GLOBAL(xmon_save_regs)
87 STL r0,0*SZL(r3) 87 PPC_STL r0,0*SZL(r3)
88 STL r2,2*SZL(r3) 88 PPC_STL r2,2*SZL(r3)
89 STL r3,3*SZL(r3) 89 PPC_STL r3,3*SZL(r3)
90 STL r4,4*SZL(r3) 90 PPC_STL r4,4*SZL(r3)
91 STL r5,5*SZL(r3) 91 PPC_STL r5,5*SZL(r3)
92 STL r6,6*SZL(r3) 92 PPC_STL r6,6*SZL(r3)
93 STL r7,7*SZL(r3) 93 PPC_STL r7,7*SZL(r3)
94 STL r8,8*SZL(r3) 94 PPC_STL r8,8*SZL(r3)
95 STL r9,9*SZL(r3) 95 PPC_STL r9,9*SZL(r3)
96 STL r10,10*SZL(r3) 96 PPC_STL r10,10*SZL(r3)
97 STL r11,11*SZL(r3) 97 PPC_STL r11,11*SZL(r3)
98 STL r12,12*SZL(r3) 98 PPC_STL r12,12*SZL(r3)
99 STL r13,13*SZL(r3) 99 PPC_STL r13,13*SZL(r3)
100 STL r14,14*SZL(r3) 100 PPC_STL r14,14*SZL(r3)
101 STL r15,15*SZL(r3) 101 PPC_STL r15,15*SZL(r3)
102 STL r16,16*SZL(r3) 102 PPC_STL r16,16*SZL(r3)
103 STL r17,17*SZL(r3) 103 PPC_STL r17,17*SZL(r3)
104 STL r18,18*SZL(r3) 104 PPC_STL r18,18*SZL(r3)
105 STL r19,19*SZL(r3) 105 PPC_STL r19,19*SZL(r3)
106 STL r20,20*SZL(r3) 106 PPC_STL r20,20*SZL(r3)
107 STL r21,21*SZL(r3) 107 PPC_STL r21,21*SZL(r3)
108 STL r22,22*SZL(r3) 108 PPC_STL r22,22*SZL(r3)
109 STL r23,23*SZL(r3) 109 PPC_STL r23,23*SZL(r3)
110 STL r24,24*SZL(r3) 110 PPC_STL r24,24*SZL(r3)
111 STL r25,25*SZL(r3) 111 PPC_STL r25,25*SZL(r3)
112 STL r26,26*SZL(r3) 112 PPC_STL r26,26*SZL(r3)
113 STL r27,27*SZL(r3) 113 PPC_STL r27,27*SZL(r3)
114 STL r28,28*SZL(r3) 114 PPC_STL r28,28*SZL(r3)
115 STL r29,29*SZL(r3) 115 PPC_STL r29,29*SZL(r3)
116 STL r30,30*SZL(r3) 116 PPC_STL r30,30*SZL(r3)
117 STL r31,31*SZL(r3) 117 PPC_STL r31,31*SZL(r3)
118 /* go up one stack frame for SP */ 118 /* go up one stack frame for SP */
119 LDL r4,0(r1) 119 PPC_LL r4,0(r1)
120 STL r4,1*SZL(r3) 120 PPC_STL r4,1*SZL(r3)
121 /* get caller's LR */ 121 /* get caller's LR */
122 LDL r0,LRSAVE(r4) 122 PPC_LL r0,LRSAVE(r4)
123 STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) 123 PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
124 STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) 124 PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
125 mfmsr r0 125 mfmsr r0
126 STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) 126 PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
127 mfctr r0 127 mfctr r0
128 STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) 128 PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
129 mfxer r0 129 mfxer r0
130 STL r0,_XER-STACK_FRAME_OVERHEAD(r3) 130 PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
131 mfcr r0 131 mfcr r0
132 STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) 132 PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
133 li r0,0 133 li r0,0
134 STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) 134 PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
135 blr 135 blr
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
index 69b658c0f760..c2464df4217e 100644
--- a/arch/powerpc/xmon/start_32.c
+++ b/arch/powerpc/xmon/start_32.c
@@ -11,7 +11,6 @@
11#include <linux/cuda.h> 11#include <linux/cuda.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/sysrq.h>
15#include <linux/bitops.h> 14#include <linux/bitops.h>
16#include <asm/xmon.h> 15#include <asm/xmon.h>
17#include <asm/prom.h> 16#include <asm/prom.h>
@@ -22,10 +21,11 @@
22#include <asm/processor.h> 21#include <asm/processor.h>
23#include <asm/delay.h> 22#include <asm/delay.h>
24#include <asm/btext.h> 23#include <asm/btext.h>
24#include <asm/time.h>
25#include "nonstdio.h"
25 26
26static volatile unsigned char __iomem *sccc, *sccd; 27static volatile unsigned char __iomem *sccc, *sccd;
27unsigned int TXRDY, RXRDY, DLAB; 28unsigned int TXRDY, RXRDY, DLAB;
28static int xmon_expect(const char *str, unsigned int timeout);
29 29
30static int use_serial; 30static int use_serial;
31static int use_screen; 31static int use_screen;
@@ -33,16 +33,6 @@ static int via_modem;
33static int xmon_use_sccb; 33static int xmon_use_sccb;
34static struct device_node *channel_node; 34static struct device_node *channel_node;
35 35
36#define TB_SPEED 25000000
37
38static inline unsigned int readtb(void)
39{
40 unsigned int ret;
41
42 asm volatile("mftb %0" : "=r" (ret) :);
43 return ret;
44}
45
46void buf_access(void) 36void buf_access(void)
47{ 37{
48 if (DLAB) 38 if (DLAB)
@@ -91,23 +81,7 @@ static unsigned long chrp_find_phys_io_base(void)
91} 81}
92#endif /* CONFIG_PPC_CHRP */ 82#endif /* CONFIG_PPC_CHRP */
93 83
94#ifdef CONFIG_MAGIC_SYSRQ 84void xmon_map_scc(void)
95static void sysrq_handle_xmon(int key, struct pt_regs *regs,
96 struct tty_struct *tty)
97{
98 xmon(regs);
99}
100
101static struct sysrq_key_op sysrq_xmon_op =
102{
103 .handler = sysrq_handle_xmon,
104 .help_msg = "Xmon",
105 .action_msg = "Entering xmon",
106};
107#endif
108
109void
110xmon_map_scc(void)
111{ 85{
112#ifdef CONFIG_PPC_MULTIPLATFORM 86#ifdef CONFIG_PPC_MULTIPLATFORM
113 volatile unsigned char __iomem *base; 87 volatile unsigned char __iomem *base;
@@ -217,8 +191,6 @@ xmon_map_scc(void)
217 RXRDY = 1; 191 RXRDY = 1;
218 DLAB = 0x80; 192 DLAB = 0x80;
219#endif /* platform */ 193#endif /* platform */
220
221 register_sysrq_key('x', &sysrq_xmon_op);
222} 194}
223 195
224static int scc_initialized = 0; 196static int scc_initialized = 0;
@@ -238,8 +210,7 @@ static inline void do_poll_adb(void)
238#endif /* CONFIG_ADB_CUDA */ 210#endif /* CONFIG_ADB_CUDA */
239} 211}
240 212
241int 213int xmon_write(void *ptr, int nb)
242xmon_write(void *handle, void *ptr, int nb)
243{ 214{
244 char *p = ptr; 215 char *p = ptr;
245 int i, c, ct; 216 int i, c, ct;
@@ -311,8 +282,7 @@ static unsigned char xmon_shift_keytab[128] =
311 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ 282 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
312 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ 283 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
313 284
314static int 285static int xmon_get_adb_key(void)
315xmon_get_adb_key(void)
316{ 286{
317 int k, t, on; 287 int k, t, on;
318 288
@@ -350,32 +320,21 @@ xmon_get_adb_key(void)
350} 320}
351#endif /* CONFIG_BOOTX_TEXT */ 321#endif /* CONFIG_BOOTX_TEXT */
352 322
353int 323int xmon_readchar(void)
354xmon_read(void *handle, void *ptr, int nb)
355{ 324{
356 char *p = ptr;
357 int i;
358
359#ifdef CONFIG_BOOTX_TEXT 325#ifdef CONFIG_BOOTX_TEXT
360 if (use_screen) { 326 if (use_screen)
361 for (i = 0; i < nb; ++i) 327 return xmon_get_adb_key();
362 *p++ = xmon_get_adb_key();
363 return i;
364 }
365#endif 328#endif
366 if (!scc_initialized) 329 if (!scc_initialized)
367 xmon_init_scc(); 330 xmon_init_scc();
368 for (i = 0; i < nb; ++i) {
369 while ((*sccc & RXRDY) == 0) 331 while ((*sccc & RXRDY) == 0)
370 do_poll_adb(); 332 do_poll_adb();
371 buf_access(); 333 buf_access();
372 *p++ = *sccd; 334 return *sccd;
373 }
374 return i;
375} 335}
376 336
377int 337int xmon_read_poll(void)
378xmon_read_poll(void)
379{ 338{
380 if ((*sccc & RXRDY) == 0) { 339 if ((*sccc & RXRDY) == 0) {
381 do_poll_adb(); 340 do_poll_adb();
@@ -395,8 +354,7 @@ static unsigned char scc_inittab[] = {
395 3, 0xc1, /* rx enable, 8 bits */ 354 3, 0xc1, /* rx enable, 8 bits */
396}; 355};
397 356
398void 357void xmon_init_scc(void)
399xmon_init_scc(void)
400{ 358{
401 if ( _machine == _MACH_chrp ) 359 if ( _machine == _MACH_chrp )
402 { 360 {
@@ -410,6 +368,7 @@ xmon_init_scc(void)
410 else if ( _machine == _MACH_Pmac ) 368 else if ( _machine == _MACH_Pmac )
411 { 369 {
412 int i, x; 370 int i, x;
371 unsigned long timeout;
413 372
414 if (channel_node != 0) 373 if (channel_node != 0)
415 pmac_call_feature( 374 pmac_call_feature(
@@ -424,8 +383,12 @@ xmon_init_scc(void)
424 PMAC_FTR_MODEM_ENABLE, 383 PMAC_FTR_MODEM_ENABLE,
425 channel_node, 0, 1); 384 channel_node, 0, 1);
426 printk(KERN_INFO "Modem powered up by debugger !\n"); 385 printk(KERN_INFO "Modem powered up by debugger !\n");
427 t0 = readtb(); 386 t0 = get_tbl();
428 while (readtb() - t0 < 3*TB_SPEED) 387 timeout = 3 * tb_ticks_per_sec;
388 if (timeout == 0)
389 /* assume 25MHz if tb_ticks_per_sec not set */
390 timeout = 75000000;
391 while (get_tbl() - t0 < timeout)
429 eieio(); 392 eieio();
430 } 393 }
431 /* use the B channel if requested */ 394 /* use the B channel if requested */
@@ -447,164 +410,19 @@ xmon_init_scc(void)
447 scc_initialized = 1; 410 scc_initialized = 1;
448 if (via_modem) { 411 if (via_modem) {
449 for (;;) { 412 for (;;) {
450 xmon_write(NULL, "ATE1V1\r", 7); 413 xmon_write("ATE1V1\r", 7);
451 if (xmon_expect("OK", 5)) { 414 if (xmon_expect("OK", 5)) {
452 xmon_write(NULL, "ATA\r", 4); 415 xmon_write("ATA\r", 4);
453 if (xmon_expect("CONNECT", 40)) 416 if (xmon_expect("CONNECT", 40))
454 break; 417 break;
455 } 418 }
456 xmon_write(NULL, "+++", 3); 419 xmon_write("+++", 3);
457 xmon_expect("OK", 3); 420 xmon_expect("OK", 3);
458 } 421 }
459 } 422 }
460} 423}
461 424
462void *xmon_stdin; 425void xmon_enter(void)
463void *xmon_stdout;
464void *xmon_stderr;
465
466int xmon_putc(int c, void *f)
467{
468 char ch = c;
469
470 if (c == '\n')
471 xmon_putc('\r', f);
472 return xmon_write(f, &ch, 1) == 1? c: -1;
473}
474
475int xmon_putchar(int c)
476{
477 return xmon_putc(c, xmon_stdout);
478}
479
480int xmon_fputs(char *str, void *f)
481{
482 int n = strlen(str);
483
484 return xmon_write(f, str, n) == n? 0: -1;
485}
486
487int
488xmon_readchar(void)
489{
490 char ch;
491
492 for (;;) {
493 switch (xmon_read(xmon_stdin, &ch, 1)) {
494 case 1:
495 return ch;
496 case -1:
497 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
498 return -1;
499 }
500 }
501}
502
503static char line[256];
504static char *lineptr;
505static int lineleft;
506
507int xmon_expect(const char *str, unsigned int timeout)
508{
509 int c;
510 unsigned int t0;
511
512 timeout *= TB_SPEED;
513 t0 = readtb();
514 do {
515 lineptr = line;
516 for (;;) {
517 c = xmon_read_poll();
518 if (c == -1) {
519 if (readtb() - t0 > timeout)
520 return 0;
521 continue;
522 }
523 if (c == '\n')
524 break;
525 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
526 *lineptr++ = c;
527 }
528 *lineptr = 0;
529 } while (strstr(line, str) == NULL);
530 return 1;
531}
532
533int
534xmon_getchar(void)
535{
536 int c;
537
538 if (lineleft == 0) {
539 lineptr = line;
540 for (;;) {
541 c = xmon_readchar();
542 if (c == -1 || c == 4)
543 break;
544 if (c == '\r' || c == '\n') {
545 *lineptr++ = '\n';
546 xmon_putchar('\n');
547 break;
548 }
549 switch (c) {
550 case 0177:
551 case '\b':
552 if (lineptr > line) {
553 xmon_putchar('\b');
554 xmon_putchar(' ');
555 xmon_putchar('\b');
556 --lineptr;
557 }
558 break;
559 case 'U' & 0x1F:
560 while (lineptr > line) {
561 xmon_putchar('\b');
562 xmon_putchar(' ');
563 xmon_putchar('\b');
564 --lineptr;
565 }
566 break;
567 default:
568 if (lineptr >= &line[sizeof(line) - 1])
569 xmon_putchar('\a');
570 else {
571 xmon_putchar(c);
572 *lineptr++ = c;
573 }
574 }
575 }
576 lineleft = lineptr - line;
577 lineptr = line;
578 }
579 if (lineleft == 0)
580 return -1;
581 --lineleft;
582 return *lineptr++;
583}
584
585char *
586xmon_fgets(char *str, int nb, void *f)
587{
588 char *p;
589 int c;
590
591 for (p = str; p < str + nb - 1; ) {
592 c = xmon_getchar();
593 if (c == -1) {
594 if (p == str)
595 return NULL;
596 break;
597 }
598 *p++ = c;
599 if (c == '\n')
600 break;
601 }
602 *p = 0;
603 return str;
604}
605
606void
607xmon_enter(void)
608{ 426{
609#ifdef CONFIG_ADB_PMU 427#ifdef CONFIG_ADB_PMU
610 if (_machine == _MACH_Pmac) { 428 if (_machine == _MACH_Pmac) {
@@ -613,8 +431,7 @@ xmon_enter(void)
613#endif 431#endif
614} 432}
615 433
616void 434void xmon_leave(void)
617xmon_leave(void)
618{ 435{
619#ifdef CONFIG_ADB_PMU 436#ifdef CONFIG_ADB_PMU
620 if (_machine == _MACH_Pmac) { 437 if (_machine == _MACH_Pmac) {
diff --git a/arch/powerpc/xmon/start_64.c b/arch/powerpc/xmon/start_64.c
index e50c158191e1..712552c4f242 100644
--- a/arch/powerpc/xmon/start_64.c
+++ b/arch/powerpc/xmon/start_64.c
@@ -6,182 +6,29 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#include <linux/config.h>
10#include <linux/string.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/sysrq.h>
14#include <linux/init.h>
15#include <asm/machdep.h> 9#include <asm/machdep.h>
16#include <asm/io.h>
17#include <asm/page.h>
18#include <asm/prom.h>
19#include <asm/processor.h>
20#include <asm/udbg.h> 10#include <asm/udbg.h>
21#include <asm/system.h>
22#include "nonstdio.h" 11#include "nonstdio.h"
23 12
24#ifdef CONFIG_MAGIC_SYSRQ 13void xmon_map_scc(void)
25
26static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
27 struct tty_struct *tty)
28{
29 /* ensure xmon is enabled */
30 xmon_init(1);
31 debugger(pt_regs);
32}
33
34static struct sysrq_key_op sysrq_xmon_op =
35{ 14{
36 .handler = sysrq_handle_xmon,
37 .help_msg = "Xmon",
38 .action_msg = "Entering xmon",
39};
40
41static int __init setup_xmon_sysrq(void)
42{
43 register_sysrq_key('x', &sysrq_xmon_op);
44 return 0;
45} 15}
46__initcall(setup_xmon_sysrq);
47#endif /* CONFIG_MAGIC_SYSRQ */
48 16
49int 17int xmon_write(void *ptr, int nb)
50xmon_write(void *handle, void *ptr, int nb)
51{ 18{
52 return udbg_write(ptr, nb); 19 return udbg_write(ptr, nb);
53} 20}
54 21
55int 22int xmon_readchar(void)
56xmon_read(void *handle, void *ptr, int nb)
57{ 23{
58 return udbg_read(ptr, nb); 24 if (udbg_getc)
25 return udbg_getc();
26 return -1;
59} 27}
60 28
61int 29int xmon_read_poll(void)
62xmon_read_poll(void)
63{ 30{
64 if (udbg_getc_poll) 31 if (udbg_getc_poll)
65 return udbg_getc_poll(); 32 return udbg_getc_poll();
66 return -1; 33 return -1;
67} 34}
68
69FILE *xmon_stdin;
70FILE *xmon_stdout;
71
72int
73xmon_putc(int c, void *f)
74{
75 char ch = c;
76
77 if (c == '\n')
78 xmon_putc('\r', f);
79 return xmon_write(f, &ch, 1) == 1? c: -1;
80}
81
82int
83xmon_putchar(int c)
84{
85 return xmon_putc(c, xmon_stdout);
86}
87
88int
89xmon_fputs(char *str, void *f)
90{
91 int n = strlen(str);
92
93 return xmon_write(f, str, n) == n? 0: -1;
94}
95
96int
97xmon_readchar(void)
98{
99 char ch;
100
101 for (;;) {
102 switch (xmon_read(xmon_stdin, &ch, 1)) {
103 case 1:
104 return ch;
105 case -1:
106 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
107 return -1;
108 }
109 }
110}
111
112static char line[256];
113static char *lineptr;
114static int lineleft;
115
116int
117xmon_getchar(void)
118{
119 int c;
120
121 if (lineleft == 0) {
122 lineptr = line;
123 for (;;) {
124 c = xmon_readchar();
125 if (c == -1 || c == 4)
126 break;
127 if (c == '\r' || c == '\n') {
128 *lineptr++ = '\n';
129 xmon_putchar('\n');
130 break;
131 }
132 switch (c) {
133 case 0177:
134 case '\b':
135 if (lineptr > line) {
136 xmon_putchar('\b');
137 xmon_putchar(' ');
138 xmon_putchar('\b');
139 --lineptr;
140 }
141 break;
142 case 'U' & 0x1F:
143 while (lineptr > line) {
144 xmon_putchar('\b');
145 xmon_putchar(' ');
146 xmon_putchar('\b');
147 --lineptr;
148 }
149 break;
150 default:
151 if (lineptr >= &line[sizeof(line) - 1])
152 xmon_putchar('\a');
153 else {
154 xmon_putchar(c);
155 *lineptr++ = c;
156 }
157 }
158 }
159 lineleft = lineptr - line;
160 lineptr = line;
161 }
162 if (lineleft == 0)
163 return -1;
164 --lineleft;
165 return *lineptr++;
166}
167
168char *
169xmon_fgets(char *str, int nb, void *f)
170{
171 char *p;
172 int c;
173
174 for (p = str; p < str + nb - 1; ) {
175 c = xmon_getchar();
176 if (c == -1) {
177 if (p == str)
178 return NULL;
179 break;
180 }
181 *p++ = c;
182 if (c == '\n')
183 break;
184 }
185 *p = 0;
186 return str;
187}
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
index a48bd594cf61..4c17b0486ad5 100644
--- a/arch/powerpc/xmon/start_8xx.c
+++ b/arch/powerpc/xmon/start_8xx.c
@@ -15,273 +15,30 @@
15#include <asm/8xx_immap.h> 15#include <asm/8xx_immap.h>
16#include <asm/mpc8xx.h> 16#include <asm/mpc8xx.h>
17#include <asm/commproc.h> 17#include <asm/commproc.h>
18#include "nonstdio.h"
18 19
19extern void xmon_printf(const char *fmt, ...);
20extern int xmon_8xx_write(char *str, int nb); 20extern int xmon_8xx_write(char *str, int nb);
21extern int xmon_8xx_read_poll(void); 21extern int xmon_8xx_read_poll(void);
22extern int xmon_8xx_read_char(void); 22extern int xmon_8xx_read_char(void);
23void prom_drawhex(uint);
24void prom_drawstring(const char *str);
25 23
26static int use_screen = 1; /* default */ 24void xmon_map_scc(void)
27
28#define TB_SPEED 25000000
29
30static inline unsigned int readtb(void)
31{
32 unsigned int ret;
33
34 asm volatile("mftb %0" : "=r" (ret) :);
35 return ret;
36}
37
38void buf_access(void)
39{
40}
41
42void
43xmon_map_scc(void)
44{ 25{
45
46 cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm); 26 cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
47 use_screen = 0;
48
49 prom_drawstring("xmon uses serial port\n");
50} 27}
51 28
52static int scc_initialized = 0;
53
54void xmon_init_scc(void); 29void xmon_init_scc(void);
55 30
56int 31int xmon_write(void *ptr, int nb)
57xmon_write(void *handle, void *ptr, int nb)
58{ 32{
59 char *p = ptr;
60 int i, c, ct;
61
62 if (!scc_initialized)
63 xmon_init_scc();
64
65 return(xmon_8xx_write(ptr, nb)); 33 return(xmon_8xx_write(ptr, nb));
66} 34}
67 35
68int xmon_wants_key; 36int xmon_readchar(void)
69
70int
71xmon_read(void *handle, void *ptr, int nb)
72{ 37{
73 char *p = ptr; 38 return xmon_8xx_read_char();
74 int i;
75
76 if (!scc_initialized)
77 xmon_init_scc();
78
79 for (i = 0; i < nb; ++i) {
80 *p++ = xmon_8xx_read_char();
81 }
82 return i;
83} 39}
84 40
85int 41int xmon_read_poll(void)
86xmon_read_poll(void)
87{ 42{
88 return(xmon_8xx_read_poll()); 43 return(xmon_8xx_read_poll());
89} 44}
90
91void
92xmon_init_scc()
93{
94 scc_initialized = 1;
95}
96
97#if 0
98extern int (*prom_entry)(void *);
99
100int
101xmon_exit(void)
102{
103 struct prom_args {
104 char *service;
105 } args;
106
107 for (;;) {
108 args.service = "exit";
109 (*prom_entry)(&args);
110 }
111}
112#endif
113
114void *xmon_stdin;
115void *xmon_stdout;
116void *xmon_stderr;
117
118void
119xmon_init(void)
120{
121}
122
123int
124xmon_putc(int c, void *f)
125{
126 char ch = c;
127
128 if (c == '\n')
129 xmon_putc('\r', f);
130 return xmon_write(f, &ch, 1) == 1? c: -1;
131}
132
133int
134xmon_putchar(int c)
135{
136 return xmon_putc(c, xmon_stdout);
137}
138
139int
140xmon_fputs(char *str, void *f)
141{
142 int n = strlen(str);
143
144 return xmon_write(f, str, n) == n? 0: -1;
145}
146
147int
148xmon_readchar(void)
149{
150 char ch;
151
152 for (;;) {
153 switch (xmon_read(xmon_stdin, &ch, 1)) {
154 case 1:
155 return ch;
156 case -1:
157 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
158 return -1;
159 }
160 }
161}
162
163static char line[256];
164static char *lineptr;
165static int lineleft;
166
167#if 0
168int xmon_expect(const char *str, unsigned int timeout)
169{
170 int c;
171 unsigned int t0;
172
173 timeout *= TB_SPEED;
174 t0 = readtb();
175 do {
176 lineptr = line;
177 for (;;) {
178 c = xmon_read_poll();
179 if (c == -1) {
180 if (readtb() - t0 > timeout)
181 return 0;
182 continue;
183 }
184 if (c == '\n')
185 break;
186 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
187 *lineptr++ = c;
188 }
189 *lineptr = 0;
190 } while (strstr(line, str) == NULL);
191 return 1;
192}
193#endif
194
195int
196xmon_getchar(void)
197{
198 int c;
199
200 if (lineleft == 0) {
201 lineptr = line;
202 for (;;) {
203 c = xmon_readchar();
204 if (c == -1 || c == 4)
205 break;
206 if (c == '\r' || c == '\n') {
207 *lineptr++ = '\n';
208 xmon_putchar('\n');
209 break;
210 }
211 switch (c) {
212 case 0177:
213 case '\b':
214 if (lineptr > line) {
215 xmon_putchar('\b');
216 xmon_putchar(' ');
217 xmon_putchar('\b');
218 --lineptr;
219 }
220 break;
221 case 'U' & 0x1F:
222 while (lineptr > line) {
223 xmon_putchar('\b');
224 xmon_putchar(' ');
225 xmon_putchar('\b');
226 --lineptr;
227 }
228 break;
229 default:
230 if (lineptr >= &line[sizeof(line) - 1])
231 xmon_putchar('\a');
232 else {
233 xmon_putchar(c);
234 *lineptr++ = c;
235 }
236 }
237 }
238 lineleft = lineptr - line;
239 lineptr = line;
240 }
241 if (lineleft == 0)
242 return -1;
243 --lineleft;
244 return *lineptr++;
245}
246
247char *
248xmon_fgets(char *str, int nb, void *f)
249{
250 char *p;
251 int c;
252
253 for (p = str; p < str + nb - 1; ) {
254 c = xmon_getchar();
255 if (c == -1) {
256 if (p == str)
257 return 0;
258 break;
259 }
260 *p++ = c;
261 if (c == '\n')
262 break;
263 }
264 *p = 0;
265 return str;
266}
267
268void
269prom_drawhex(uint val)
270{
271 unsigned char buf[10];
272
273 int i;
274 for (i = 7; i >= 0; i--)
275 {
276 buf[i] = "0123456789abcdef"[val & 0x0f];
277 val >>= 4;
278 }
279 buf[8] = '\0';
280 xmon_fputs(buf, xmon_stdout);
281}
282
283void
284prom_drawstring(const char *str)
285{
286 xmon_fputs(str, xmon_stdout);
287}
diff --git a/arch/powerpc/xmon/subr_prf.c b/arch/powerpc/xmon/subr_prf.c
deleted file mode 100644
index b48738c6dd33..000000000000
--- a/arch/powerpc/xmon/subr_prf.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Written by Cort Dougan to replace the version originally used
3 * by Paul Mackerras, which came from NetBSD and thus had copyright
4 * conflicts with Linux.
5 *
6 * This file makes liberal use of the standard linux utility
7 * routines to reduce the size of the binary. We assume we can
8 * trust some parts of Linux inside the debugger.
9 * -- Cort (cort@cs.nmt.edu)
10 *
11 * Copyright (C) 1999 Cort Dougan.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/module.h>
22#include <stdarg.h>
23#include "nonstdio.h"
24
25extern int xmon_write(void *, void *, int);
26
27void xmon_vfprintf(void *f, const char *fmt, va_list ap)
28{
29 static char xmon_buf[2048];
30 int n;
31
32 n = vsprintf(xmon_buf, fmt, ap);
33 xmon_write(f, xmon_buf, n);
34}
35
36void xmon_printf(const char *fmt, ...)
37{
38 va_list ap;
39
40 va_start(ap, fmt);
41 xmon_vfprintf(stdout, fmt, ap);
42 va_end(ap);
43}
44EXPORT_SYMBOL(xmon_printf);
45
46void xmon_fprintf(void *f, const char *fmt, ...)
47{
48 va_list ap;
49
50 va_start(ap, fmt);
51 xmon_vfprintf(f, fmt, ap);
52 va_end(ap);
53}
54
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1124f1146202..cfcb2a56d662 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Routines providing a simple monitor for use on the PowerMac. 2 * Routines providing a simple monitor for use on the PowerMac.
3 * 3 *
4 * Copyright (C) 1996 Paul Mackerras. 4 * Copyright (C) 1996-2005 Paul Mackerras.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -18,6 +18,7 @@
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/sysrq.h>
21 22
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/string.h> 24#include <asm/string.h>
@@ -144,15 +145,10 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
144static const char *getvecname(unsigned long vec); 145static const char *getvecname(unsigned long vec);
145 146
146extern int print_insn_powerpc(unsigned long, unsigned long, int); 147extern int print_insn_powerpc(unsigned long, unsigned long, int);
147extern void printf(const char *fmt, ...);
148extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
149extern int xmon_putc(int c, void *f);
150extern int putchar(int ch);
151 148
152extern void xmon_enter(void); 149extern void xmon_enter(void);
153extern void xmon_leave(void); 150extern void xmon_leave(void);
154 151
155extern int xmon_read_poll(void);
156extern long setjmp(long *); 152extern long setjmp(long *);
157extern void longjmp(long *, long); 153extern void longjmp(long *, long);
158extern void xmon_save_regs(struct pt_regs *); 154extern void xmon_save_regs(struct pt_regs *);
@@ -748,7 +744,6 @@ cmds(struct pt_regs *excp)
748 printf("%x:", smp_processor_id()); 744 printf("%x:", smp_processor_id());
749#endif /* CONFIG_SMP */ 745#endif /* CONFIG_SMP */
750 printf("mon> "); 746 printf("mon> ");
751 fflush(stdout);
752 flush_input(); 747 flush_input();
753 termch = 0; 748 termch = 0;
754 cmd = skipbl(); 749 cmd = skipbl();
@@ -1797,7 +1792,7 @@ memex(void)
1797 for(;;){ 1792 for(;;){
1798 if (!mnoread) 1793 if (!mnoread)
1799 n = mread(adrs, val, size); 1794 n = mread(adrs, val, size);
1800 printf("%.16x%c", adrs, brev? 'r': ' '); 1795 printf(REG"%c", adrs, brev? 'r': ' ');
1801 if (!mnoread) { 1796 if (!mnoread) {
1802 if (brev) 1797 if (brev)
1803 byterev(val, size); 1798 byterev(val, size);
@@ -1976,17 +1971,18 @@ prdump(unsigned long adrs, long ndump)
1976 nr = mread(adrs, temp, r); 1971 nr = mread(adrs, temp, r);
1977 adrs += nr; 1972 adrs += nr;
1978 for (m = 0; m < r; ++m) { 1973 for (m = 0; m < r; ++m) {
1979 if ((m & 7) == 0 && m > 0) 1974 if ((m & (sizeof(long) - 1)) == 0 && m > 0)
1980 putchar(' '); 1975 putchar(' ');
1981 if (m < nr) 1976 if (m < nr)
1982 printf("%.2x", temp[m]); 1977 printf("%.2x", temp[m]);
1983 else 1978 else
1984 printf("%s", fault_chars[fault_type]); 1979 printf("%s", fault_chars[fault_type]);
1985 } 1980 }
1986 if (m <= 8) 1981 for (; m < 16; ++m) {
1987 printf(" "); 1982 if ((m & (sizeof(long) - 1)) == 0)
1988 for (; m < 16; ++m) 1983 putchar(' ');
1989 printf(" "); 1984 printf(" ");
1985 }
1990 printf(" |"); 1986 printf(" |");
1991 for (m = 0; m < r; ++m) { 1987 for (m = 0; m < r; ++m) {
1992 if (m < nr) { 1988 if (m < nr) {
@@ -2151,7 +2147,6 @@ memzcan(void)
2151 ok = mread(a, &v, 1); 2147 ok = mread(a, &v, 1);
2152 if (ok && !ook) { 2148 if (ok && !ook) {
2153 printf("%.8x .. ", a); 2149 printf("%.8x .. ", a);
2154 fflush(stdout);
2155 } else if (!ok && ook) 2150 } else if (!ok && ook)
2156 printf("%.8x\n", a - mskip); 2151 printf("%.8x\n", a - mskip);
2157 ook = ok; 2152 ook = ok;
@@ -2372,7 +2367,7 @@ int
2372inchar(void) 2367inchar(void)
2373{ 2368{
2374 if (lineptr == NULL || *lineptr == 0) { 2369 if (lineptr == NULL || *lineptr == 0) {
2375 if (fgets(line, sizeof(line), stdin) == NULL) { 2370 if (xmon_gets(line, sizeof(line)) == NULL) {
2376 lineptr = NULL; 2371 lineptr = NULL;
2377 return EOF; 2372 return EOF;
2378 } 2373 }
@@ -2526,4 +2521,29 @@ void xmon_init(int enable)
2526 __debugger_dabr_match = NULL; 2521 __debugger_dabr_match = NULL;
2527 __debugger_fault_handler = NULL; 2522 __debugger_fault_handler = NULL;
2528 } 2523 }
2524 xmon_map_scc();
2525}
2526
2527#ifdef CONFIG_MAGIC_SYSRQ
2528static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
2529 struct tty_struct *tty)
2530{
2531 /* ensure xmon is enabled */
2532 xmon_init(1);
2533 debugger(pt_regs);
2534}
2535
2536static struct sysrq_key_op sysrq_xmon_op =
2537{
2538 .handler = sysrq_handle_xmon,
2539 .help_msg = "Xmon",
2540 .action_msg = "Entering xmon",
2541};
2542
2543static int __init setup_xmon_sysrq(void)
2544{
2545 register_sysrq_key('x', &sysrq_xmon_op);
2546 return 0;
2529} 2547}
2548__initcall(setup_xmon_sysrq);
2549#endif /* CONFIG_MAGIC_SYSRQ */