aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S15
-rw-r--r--arch/powerpc/kernel/cputable.c149
-rw-r--r--arch/powerpc/kernel/crash.c101
-rw-r--r--arch/powerpc/kernel/dma_64.c19
-rw-r--r--arch/powerpc/kernel/head_44x.S14
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_booke.h2
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S20
-rw-r--r--arch/powerpc/kernel/ibmebus.c12
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/isa-bridge.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c51
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/misc.S73
-rw-r--r--arch/powerpc/kernel/misc_32.S56
-rw-r--r--arch/powerpc/kernel/misc_64.S7
-rw-r--r--arch/powerpc/kernel/module_32.c77
-rw-r--r--arch/powerpc/kernel/module_64.c81
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/of_platform.c33
-rw-r--r--arch/powerpc/kernel/pci-common.c774
-rw-r--r--arch/powerpc/kernel/pci_32.c969
-rw-r--r--arch/powerpc/kernel/pci_64.c444
-rw-r--r--arch/powerpc/kernel/pci_dn.c7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/prom.c52
-rw-r--r--arch/powerpc/kernel/prom_init.c149
-rw-r--r--arch/powerpc/kernel/prom_parse.c22
-rw-r--r--arch/powerpc/kernel/rio.c52
-rw-r--r--arch/powerpc/kernel/rtas_pci.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c78
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/smp.c49
-rw-r--r--arch/powerpc/kernel/systbl_chk.c58
-rw-r--r--arch/powerpc/kernel/systbl_chk.sh33
-rw-r--r--arch/powerpc/kernel/time.c91
-rw-r--r--arch/powerpc/kernel/traps.c87
-rw-r--r--arch/powerpc/kernel/udbg.c7
-rw-r--r--arch/powerpc/kernel/udbg_16550.c43
41 files changed, 1998 insertions, 1688 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ca51f0cf27ab..58dbfeff9b4d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5ifeq ($(CONFIG_PPC64),y) 5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc 6CFLAGS_prom_init.o += -mno-minimal-toc
7endif 7endif
8ifeq ($(CONFIG_PPC32),y) 8ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC 9CFLAGS_prom_init.o += -fPIC
@@ -70,6 +70,7 @@ pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
70obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ 70obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
71 pci-common.o 71 pci-common.o
72obj-$(CONFIG_PCI_MSI) += msi.o 72obj-$(CONFIG_PCI_MSI) += msi.o
73obj-$(CONFIG_RAPIDIO) += rio.o
73obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ 74obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
74 machine_kexec_$(CONFIG_WORD_SIZE).o 75 machine_kexec_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_AUDIT) += audit.o 76obj-$(CONFIG_AUDIT) += audit.o
@@ -91,3 +92,13 @@ obj-$(CONFIG_PPC64) += $(obj64-y)
91 92
92extra-$(CONFIG_PPC_FPU) += fpu.o 93extra-$(CONFIG_PPC_FPU) += fpu.o
93extra-$(CONFIG_PPC64) += entry_64.o 94extra-$(CONFIG_PPC64) += entry_64.o
95
96extra-y += systbl_chk.i
97$(obj)/systbl.o: systbl_chk
98
99quiet_cmd_systbl_chk = CALL $<
100 cmd_systbl_chk = $(CONFIG_SHELL) $< $(obj)/systbl_chk.i
101
102PHONY += systbl_chk
103systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
104 $(call cmd,systbl_chk)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 9c74fdf29eec..80e2eef05b2e 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -236,7 +236,7 @@ int __init btext_find_display(int allow_nonstdout)
236 if (rc == 0 || !allow_nonstdout) 236 if (rc == 0 || !allow_nonstdout)
237 return rc; 237 return rc;
238 238
239 for (np = NULL; (np = of_find_node_by_type(np, "display"));) { 239 for_each_node_by_type(np, "display") {
240 if (of_get_property(np, "linux,opened", NULL)) { 240 if (of_get_property(np, "linux,opened", NULL)) {
241 printk("trying %s ...\n", np->full_name); 241 printk("trying %s ...\n", np->full_name);
242 rc = btext_initialize(np); 242 rc = btext_initialize(np);
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index 8e1812e2f3ee..6250443ab9c9 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -23,11 +23,24 @@ _GLOBAL(__setup_cpu_440epx)
23 mflr r4 23 mflr r4
24 bl __init_fpu_44x 24 bl __init_fpu_44x
25 bl __plb_disable_wrp 25 bl __plb_disable_wrp
26 bl __fixup_440A_mcheck
26 mtlr r4 27 mtlr r4
27 blr 28 blr
28_GLOBAL(__setup_cpu_440grx) 29_GLOBAL(__setup_cpu_440grx)
29 b __plb_disable_wrp 30 mflr r4
31 bl __plb_disable_wrp
32 bl __fixup_440A_mcheck
33 mtlr r4
34 blr
35_GLOBAL(__setup_cpu_440gx)
36_GLOBAL(__setup_cpu_440spe)
37 b __fixup_440A_mcheck
30 38
39 /* Temporary fixup for arch/ppc until we kill the whole thing */
40#ifndef CONFIG_PPC_MERGE
41_GLOBAL(__fixup_440A_mcheck)
42 blr
43#endif
31 44
32/* enable APU between CPU and FPU */ 45/* enable APU between CPU and FPU */
33_GLOBAL(__init_fpu_44x) 46_GLOBAL(__init_fpu_44x)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9ed351f3c966..a4c2771b5e62 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -33,7 +33,9 @@ EXPORT_SYMBOL(cur_cpu_spec);
33#ifdef CONFIG_PPC32 33#ifdef CONFIG_PPC32
34extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); 34extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); 35extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); 37extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); 39extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); 40extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); 41extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -85,6 +87,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
85 .pmc_type = PPC_PMC_IBM, 87 .pmc_type = PPC_PMC_IBM,
86 .oprofile_cpu_type = "ppc64/power3", 88 .oprofile_cpu_type = "ppc64/power3",
87 .oprofile_type = PPC_OPROFILE_RS64, 89 .oprofile_type = PPC_OPROFILE_RS64,
90 .machine_check = machine_check_generic,
88 .platform = "power3", 91 .platform = "power3",
89 }, 92 },
90 { /* Power3+ */ 93 { /* Power3+ */
@@ -99,6 +102,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
99 .pmc_type = PPC_PMC_IBM, 102 .pmc_type = PPC_PMC_IBM,
100 .oprofile_cpu_type = "ppc64/power3", 103 .oprofile_cpu_type = "ppc64/power3",
101 .oprofile_type = PPC_OPROFILE_RS64, 104 .oprofile_type = PPC_OPROFILE_RS64,
105 .machine_check = machine_check_generic,
102 .platform = "power3", 106 .platform = "power3",
103 }, 107 },
104 { /* Northstar */ 108 { /* Northstar */
@@ -113,6 +117,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
113 .pmc_type = PPC_PMC_IBM, 117 .pmc_type = PPC_PMC_IBM,
114 .oprofile_cpu_type = "ppc64/rs64", 118 .oprofile_cpu_type = "ppc64/rs64",
115 .oprofile_type = PPC_OPROFILE_RS64, 119 .oprofile_type = PPC_OPROFILE_RS64,
120 .machine_check = machine_check_generic,
116 .platform = "rs64", 121 .platform = "rs64",
117 }, 122 },
118 { /* Pulsar */ 123 { /* Pulsar */
@@ -127,6 +132,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
127 .pmc_type = PPC_PMC_IBM, 132 .pmc_type = PPC_PMC_IBM,
128 .oprofile_cpu_type = "ppc64/rs64", 133 .oprofile_cpu_type = "ppc64/rs64",
129 .oprofile_type = PPC_OPROFILE_RS64, 134 .oprofile_type = PPC_OPROFILE_RS64,
135 .machine_check = machine_check_generic,
130 .platform = "rs64", 136 .platform = "rs64",
131 }, 137 },
132 { /* I-star */ 138 { /* I-star */
@@ -141,6 +147,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
141 .pmc_type = PPC_PMC_IBM, 147 .pmc_type = PPC_PMC_IBM,
142 .oprofile_cpu_type = "ppc64/rs64", 148 .oprofile_cpu_type = "ppc64/rs64",
143 .oprofile_type = PPC_OPROFILE_RS64, 149 .oprofile_type = PPC_OPROFILE_RS64,
150 .machine_check = machine_check_generic,
144 .platform = "rs64", 151 .platform = "rs64",
145 }, 152 },
146 { /* S-star */ 153 { /* S-star */
@@ -155,6 +162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
155 .pmc_type = PPC_PMC_IBM, 162 .pmc_type = PPC_PMC_IBM,
156 .oprofile_cpu_type = "ppc64/rs64", 163 .oprofile_cpu_type = "ppc64/rs64",
157 .oprofile_type = PPC_OPROFILE_RS64, 164 .oprofile_type = PPC_OPROFILE_RS64,
165 .machine_check = machine_check_generic,
158 .platform = "rs64", 166 .platform = "rs64",
159 }, 167 },
160 { /* Power4 */ 168 { /* Power4 */
@@ -169,6 +177,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
169 .pmc_type = PPC_PMC_IBM, 177 .pmc_type = PPC_PMC_IBM,
170 .oprofile_cpu_type = "ppc64/power4", 178 .oprofile_cpu_type = "ppc64/power4",
171 .oprofile_type = PPC_OPROFILE_POWER4, 179 .oprofile_type = PPC_OPROFILE_POWER4,
180 .machine_check = machine_check_generic,
172 .platform = "power4", 181 .platform = "power4",
173 }, 182 },
174 { /* Power4+ */ 183 { /* Power4+ */
@@ -183,6 +192,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
183 .pmc_type = PPC_PMC_IBM, 192 .pmc_type = PPC_PMC_IBM,
184 .oprofile_cpu_type = "ppc64/power4", 193 .oprofile_cpu_type = "ppc64/power4",
185 .oprofile_type = PPC_OPROFILE_POWER4, 194 .oprofile_type = PPC_OPROFILE_POWER4,
195 .machine_check = machine_check_generic,
186 .platform = "power4", 196 .platform = "power4",
187 }, 197 },
188 { /* PPC970 */ 198 { /* PPC970 */
@@ -200,6 +210,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
200 .cpu_restore = __restore_cpu_ppc970, 210 .cpu_restore = __restore_cpu_ppc970,
201 .oprofile_cpu_type = "ppc64/970", 211 .oprofile_cpu_type = "ppc64/970",
202 .oprofile_type = PPC_OPROFILE_POWER4, 212 .oprofile_type = PPC_OPROFILE_POWER4,
213 .machine_check = machine_check_generic,
203 .platform = "ppc970", 214 .platform = "ppc970",
204 }, 215 },
205 { /* PPC970FX */ 216 { /* PPC970FX */
@@ -217,6 +228,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
217 .cpu_restore = __restore_cpu_ppc970, 228 .cpu_restore = __restore_cpu_ppc970,
218 .oprofile_cpu_type = "ppc64/970", 229 .oprofile_cpu_type = "ppc64/970",
219 .oprofile_type = PPC_OPROFILE_POWER4, 230 .oprofile_type = PPC_OPROFILE_POWER4,
231 .machine_check = machine_check_generic,
220 .platform = "ppc970", 232 .platform = "ppc970",
221 }, 233 },
222 { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ 234 { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -234,6 +246,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
234 .cpu_restore = __restore_cpu_ppc970, 246 .cpu_restore = __restore_cpu_ppc970,
235 .oprofile_cpu_type = "ppc64/970MP", 247 .oprofile_cpu_type = "ppc64/970MP",
236 .oprofile_type = PPC_OPROFILE_POWER4, 248 .oprofile_type = PPC_OPROFILE_POWER4,
249 .machine_check = machine_check_generic,
237 .platform = "ppc970", 250 .platform = "ppc970",
238 }, 251 },
239 { /* PPC970MP */ 252 { /* PPC970MP */
@@ -251,6 +264,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
251 .cpu_restore = __restore_cpu_ppc970, 264 .cpu_restore = __restore_cpu_ppc970,
252 .oprofile_cpu_type = "ppc64/970MP", 265 .oprofile_cpu_type = "ppc64/970MP",
253 .oprofile_type = PPC_OPROFILE_POWER4, 266 .oprofile_type = PPC_OPROFILE_POWER4,
267 .machine_check = machine_check_generic,
254 .platform = "ppc970", 268 .platform = "ppc970",
255 }, 269 },
256 { /* PPC970GX */ 270 { /* PPC970GX */
@@ -267,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
267 .cpu_setup = __setup_cpu_ppc970, 281 .cpu_setup = __setup_cpu_ppc970,
268 .oprofile_cpu_type = "ppc64/970", 282 .oprofile_cpu_type = "ppc64/970",
269 .oprofile_type = PPC_OPROFILE_POWER4, 283 .oprofile_type = PPC_OPROFILE_POWER4,
284 .machine_check = machine_check_generic,
270 .platform = "ppc970", 285 .platform = "ppc970",
271 }, 286 },
272 { /* Power5 GR */ 287 { /* Power5 GR */
@@ -286,6 +301,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
286 */ 301 */
287 .oprofile_mmcra_sihv = MMCRA_SIHV, 302 .oprofile_mmcra_sihv = MMCRA_SIHV,
288 .oprofile_mmcra_sipr = MMCRA_SIPR, 303 .oprofile_mmcra_sipr = MMCRA_SIPR,
304 .machine_check = machine_check_generic,
289 .platform = "power5", 305 .platform = "power5",
290 }, 306 },
291 { /* Power5++ */ 307 { /* Power5++ */
@@ -301,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
301 .oprofile_type = PPC_OPROFILE_POWER4, 317 .oprofile_type = PPC_OPROFILE_POWER4,
302 .oprofile_mmcra_sihv = MMCRA_SIHV, 318 .oprofile_mmcra_sihv = MMCRA_SIHV,
303 .oprofile_mmcra_sipr = MMCRA_SIPR, 319 .oprofile_mmcra_sipr = MMCRA_SIPR,
320 .machine_check = machine_check_generic,
304 .platform = "power5+", 321 .platform = "power5+",
305 }, 322 },
306 { /* Power5 GS */ 323 { /* Power5 GS */
@@ -317,6 +334,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
317 .oprofile_type = PPC_OPROFILE_POWER4, 334 .oprofile_type = PPC_OPROFILE_POWER4,
318 .oprofile_mmcra_sihv = MMCRA_SIHV, 335 .oprofile_mmcra_sihv = MMCRA_SIHV,
319 .oprofile_mmcra_sipr = MMCRA_SIPR, 336 .oprofile_mmcra_sipr = MMCRA_SIPR,
337 .machine_check = machine_check_generic,
320 .platform = "power5+", 338 .platform = "power5+",
321 }, 339 },
322 { /* POWER6 in P5+ mode; 2.04-compliant processor */ 340 { /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -327,6 +345,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
327 .cpu_user_features = COMMON_USER_POWER5_PLUS, 345 .cpu_user_features = COMMON_USER_POWER5_PLUS,
328 .icache_bsize = 128, 346 .icache_bsize = 128,
329 .dcache_bsize = 128, 347 .dcache_bsize = 128,
348 .machine_check = machine_check_generic,
330 .platform = "power5+", 349 .platform = "power5+",
331 }, 350 },
332 { /* Power6 */ 351 { /* Power6 */
@@ -346,6 +365,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
346 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, 365 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
347 .oprofile_mmcra_clear = POWER6_MMCRA_THRM | 366 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
348 POWER6_MMCRA_OTHER, 367 POWER6_MMCRA_OTHER,
368 .machine_check = machine_check_generic,
349 .platform = "power6x", 369 .platform = "power6x",
350 }, 370 },
351 { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ 371 { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -356,6 +376,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
356 .cpu_user_features = COMMON_USER_POWER6, 376 .cpu_user_features = COMMON_USER_POWER6,
357 .icache_bsize = 128, 377 .icache_bsize = 128,
358 .dcache_bsize = 128, 378 .dcache_bsize = 128,
379 .machine_check = machine_check_generic,
359 .platform = "power6", 380 .platform = "power6",
360 }, 381 },
361 { /* Cell Broadband Engine */ 382 { /* Cell Broadband Engine */
@@ -372,6 +393,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
372 .pmc_type = PPC_PMC_IBM, 393 .pmc_type = PPC_PMC_IBM,
373 .oprofile_cpu_type = "ppc64/cell-be", 394 .oprofile_cpu_type = "ppc64/cell-be",
374 .oprofile_type = PPC_OPROFILE_CELL, 395 .oprofile_type = PPC_OPROFILE_CELL,
396 .machine_check = machine_check_generic,
375 .platform = "ppc-cell-be", 397 .platform = "ppc-cell-be",
376 }, 398 },
377 { /* PA Semi PA6T */ 399 { /* PA Semi PA6T */
@@ -388,6 +410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
388 .cpu_restore = __restore_cpu_pa6t, 410 .cpu_restore = __restore_cpu_pa6t,
389 .oprofile_cpu_type = "ppc64/pa6t", 411 .oprofile_cpu_type = "ppc64/pa6t",
390 .oprofile_type = PPC_OPROFILE_PA6T, 412 .oprofile_type = PPC_OPROFILE_PA6T,
413 .machine_check = machine_check_generic,
391 .platform = "pa6t", 414 .platform = "pa6t",
392 }, 415 },
393 { /* default match */ 416 { /* default match */
@@ -400,6 +423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
400 .dcache_bsize = 128, 423 .dcache_bsize = 128,
401 .num_pmcs = 6, 424 .num_pmcs = 6,
402 .pmc_type = PPC_PMC_IBM, 425 .pmc_type = PPC_PMC_IBM,
426 .machine_check = machine_check_generic,
403 .platform = "power4", 427 .platform = "power4",
404 } 428 }
405#endif /* CONFIG_PPC64 */ 429#endif /* CONFIG_PPC64 */
@@ -414,6 +438,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
414 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 438 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
415 .icache_bsize = 32, 439 .icache_bsize = 32,
416 .dcache_bsize = 32, 440 .dcache_bsize = 32,
441 .machine_check = machine_check_generic,
417 .platform = "ppc601", 442 .platform = "ppc601",
418 }, 443 },
419 { /* 603 */ 444 { /* 603 */
@@ -425,6 +450,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
425 .icache_bsize = 32, 450 .icache_bsize = 32,
426 .dcache_bsize = 32, 451 .dcache_bsize = 32,
427 .cpu_setup = __setup_cpu_603, 452 .cpu_setup = __setup_cpu_603,
453 .machine_check = machine_check_generic,
428 .platform = "ppc603", 454 .platform = "ppc603",
429 }, 455 },
430 { /* 603e */ 456 { /* 603e */
@@ -436,6 +462,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
436 .icache_bsize = 32, 462 .icache_bsize = 32,
437 .dcache_bsize = 32, 463 .dcache_bsize = 32,
438 .cpu_setup = __setup_cpu_603, 464 .cpu_setup = __setup_cpu_603,
465 .machine_check = machine_check_generic,
439 .platform = "ppc603", 466 .platform = "ppc603",
440 }, 467 },
441 { /* 603ev */ 468 { /* 603ev */
@@ -447,6 +474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
447 .icache_bsize = 32, 474 .icache_bsize = 32,
448 .dcache_bsize = 32, 475 .dcache_bsize = 32,
449 .cpu_setup = __setup_cpu_603, 476 .cpu_setup = __setup_cpu_603,
477 .machine_check = machine_check_generic,
450 .platform = "ppc603", 478 .platform = "ppc603",
451 }, 479 },
452 { /* 604 */ 480 { /* 604 */
@@ -459,6 +487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
459 .dcache_bsize = 32, 487 .dcache_bsize = 32,
460 .num_pmcs = 2, 488 .num_pmcs = 2,
461 .cpu_setup = __setup_cpu_604, 489 .cpu_setup = __setup_cpu_604,
490 .machine_check = machine_check_generic,
462 .platform = "ppc604", 491 .platform = "ppc604",
463 }, 492 },
464 { /* 604e */ 493 { /* 604e */
@@ -471,6 +500,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
471 .dcache_bsize = 32, 500 .dcache_bsize = 32,
472 .num_pmcs = 4, 501 .num_pmcs = 4,
473 .cpu_setup = __setup_cpu_604, 502 .cpu_setup = __setup_cpu_604,
503 .machine_check = machine_check_generic,
474 .platform = "ppc604", 504 .platform = "ppc604",
475 }, 505 },
476 { /* 604r */ 506 { /* 604r */
@@ -483,6 +513,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
483 .dcache_bsize = 32, 513 .dcache_bsize = 32,
484 .num_pmcs = 4, 514 .num_pmcs = 4,
485 .cpu_setup = __setup_cpu_604, 515 .cpu_setup = __setup_cpu_604,
516 .machine_check = machine_check_generic,
486 .platform = "ppc604", 517 .platform = "ppc604",
487 }, 518 },
488 { /* 604ev */ 519 { /* 604ev */
@@ -495,6 +526,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
495 .dcache_bsize = 32, 526 .dcache_bsize = 32,
496 .num_pmcs = 4, 527 .num_pmcs = 4,
497 .cpu_setup = __setup_cpu_604, 528 .cpu_setup = __setup_cpu_604,
529 .machine_check = machine_check_generic,
498 .platform = "ppc604", 530 .platform = "ppc604",
499 }, 531 },
500 { /* 740/750 (0x4202, don't support TAU ?) */ 532 { /* 740/750 (0x4202, don't support TAU ?) */
@@ -507,6 +539,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
507 .dcache_bsize = 32, 539 .dcache_bsize = 32,
508 .num_pmcs = 4, 540 .num_pmcs = 4,
509 .cpu_setup = __setup_cpu_750, 541 .cpu_setup = __setup_cpu_750,
542 .machine_check = machine_check_generic,
510 .platform = "ppc750", 543 .platform = "ppc750",
511 }, 544 },
512 { /* 750CX (80100 and 8010x?) */ 545 { /* 750CX (80100 and 8010x?) */
@@ -519,6 +552,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
519 .dcache_bsize = 32, 552 .dcache_bsize = 32,
520 .num_pmcs = 4, 553 .num_pmcs = 4,
521 .cpu_setup = __setup_cpu_750cx, 554 .cpu_setup = __setup_cpu_750cx,
555 .machine_check = machine_check_generic,
522 .platform = "ppc750", 556 .platform = "ppc750",
523 }, 557 },
524 { /* 750CX (82201 and 82202) */ 558 { /* 750CX (82201 and 82202) */
@@ -531,6 +565,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
531 .dcache_bsize = 32, 565 .dcache_bsize = 32,
532 .num_pmcs = 4, 566 .num_pmcs = 4,
533 .cpu_setup = __setup_cpu_750cx, 567 .cpu_setup = __setup_cpu_750cx,
568 .machine_check = machine_check_generic,
534 .platform = "ppc750", 569 .platform = "ppc750",
535 }, 570 },
536 { /* 750CXe (82214) */ 571 { /* 750CXe (82214) */
@@ -543,6 +578,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
543 .dcache_bsize = 32, 578 .dcache_bsize = 32,
544 .num_pmcs = 4, 579 .num_pmcs = 4,
545 .cpu_setup = __setup_cpu_750cx, 580 .cpu_setup = __setup_cpu_750cx,
581 .machine_check = machine_check_generic,
546 .platform = "ppc750", 582 .platform = "ppc750",
547 }, 583 },
548 { /* 750CXe "Gekko" (83214) */ 584 { /* 750CXe "Gekko" (83214) */
@@ -555,6 +591,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
555 .dcache_bsize = 32, 591 .dcache_bsize = 32,
556 .num_pmcs = 4, 592 .num_pmcs = 4,
557 .cpu_setup = __setup_cpu_750cx, 593 .cpu_setup = __setup_cpu_750cx,
594 .machine_check = machine_check_generic,
558 .platform = "ppc750", 595 .platform = "ppc750",
559 }, 596 },
560 { /* 750CL */ 597 { /* 750CL */
@@ -567,6 +604,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
567 .dcache_bsize = 32, 604 .dcache_bsize = 32,
568 .num_pmcs = 4, 605 .num_pmcs = 4,
569 .cpu_setup = __setup_cpu_750, 606 .cpu_setup = __setup_cpu_750,
607 .machine_check = machine_check_generic,
570 .platform = "ppc750", 608 .platform = "ppc750",
571 }, 609 },
572 { /* 745/755 */ 610 { /* 745/755 */
@@ -579,6 +617,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
579 .dcache_bsize = 32, 617 .dcache_bsize = 32,
580 .num_pmcs = 4, 618 .num_pmcs = 4,
581 .cpu_setup = __setup_cpu_750, 619 .cpu_setup = __setup_cpu_750,
620 .machine_check = machine_check_generic,
582 .platform = "ppc750", 621 .platform = "ppc750",
583 }, 622 },
584 { /* 750FX rev 1.x */ 623 { /* 750FX rev 1.x */
@@ -591,6 +630,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
591 .dcache_bsize = 32, 630 .dcache_bsize = 32,
592 .num_pmcs = 4, 631 .num_pmcs = 4,
593 .cpu_setup = __setup_cpu_750, 632 .cpu_setup = __setup_cpu_750,
633 .machine_check = machine_check_generic,
594 .platform = "ppc750", 634 .platform = "ppc750",
595 }, 635 },
596 { /* 750FX rev 2.0 must disable HID0[DPM] */ 636 { /* 750FX rev 2.0 must disable HID0[DPM] */
@@ -603,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
603 .dcache_bsize = 32, 643 .dcache_bsize = 32,
604 .num_pmcs = 4, 644 .num_pmcs = 4,
605 .cpu_setup = __setup_cpu_750, 645 .cpu_setup = __setup_cpu_750,
646 .machine_check = machine_check_generic,
606 .platform = "ppc750", 647 .platform = "ppc750",
607 }, 648 },
608 { /* 750FX (All revs except 2.0) */ 649 { /* 750FX (All revs except 2.0) */
@@ -615,6 +656,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
615 .dcache_bsize = 32, 656 .dcache_bsize = 32,
616 .num_pmcs = 4, 657 .num_pmcs = 4,
617 .cpu_setup = __setup_cpu_750fx, 658 .cpu_setup = __setup_cpu_750fx,
659 .machine_check = machine_check_generic,
618 .platform = "ppc750", 660 .platform = "ppc750",
619 }, 661 },
620 { /* 750GX */ 662 { /* 750GX */
@@ -627,6 +669,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
627 .dcache_bsize = 32, 669 .dcache_bsize = 32,
628 .num_pmcs = 4, 670 .num_pmcs = 4,
629 .cpu_setup = __setup_cpu_750fx, 671 .cpu_setup = __setup_cpu_750fx,
672 .machine_check = machine_check_generic,
630 .platform = "ppc750", 673 .platform = "ppc750",
631 }, 674 },
632 { /* 740/750 (L2CR bit need fixup for 740) */ 675 { /* 740/750 (L2CR bit need fixup for 740) */
@@ -639,6 +682,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
639 .dcache_bsize = 32, 682 .dcache_bsize = 32,
640 .num_pmcs = 4, 683 .num_pmcs = 4,
641 .cpu_setup = __setup_cpu_750, 684 .cpu_setup = __setup_cpu_750,
685 .machine_check = machine_check_generic,
642 .platform = "ppc750", 686 .platform = "ppc750",
643 }, 687 },
644 { /* 7400 rev 1.1 ? (no TAU) */ 688 { /* 7400 rev 1.1 ? (no TAU) */
@@ -652,6 +696,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
652 .dcache_bsize = 32, 696 .dcache_bsize = 32,
653 .num_pmcs = 4, 697 .num_pmcs = 4,
654 .cpu_setup = __setup_cpu_7400, 698 .cpu_setup = __setup_cpu_7400,
699 .machine_check = machine_check_generic,
655 .platform = "ppc7400", 700 .platform = "ppc7400",
656 }, 701 },
657 { /* 7400 */ 702 { /* 7400 */
@@ -665,6 +710,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
665 .dcache_bsize = 32, 710 .dcache_bsize = 32,
666 .num_pmcs = 4, 711 .num_pmcs = 4,
667 .cpu_setup = __setup_cpu_7400, 712 .cpu_setup = __setup_cpu_7400,
713 .machine_check = machine_check_generic,
668 .platform = "ppc7400", 714 .platform = "ppc7400",
669 }, 715 },
670 { /* 7410 */ 716 { /* 7410 */
@@ -678,6 +724,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
678 .dcache_bsize = 32, 724 .dcache_bsize = 32,
679 .num_pmcs = 4, 725 .num_pmcs = 4,
680 .cpu_setup = __setup_cpu_7410, 726 .cpu_setup = __setup_cpu_7410,
727 .machine_check = machine_check_generic,
681 .platform = "ppc7400", 728 .platform = "ppc7400",
682 }, 729 },
683 { /* 7450 2.0 - no doze/nap */ 730 { /* 7450 2.0 - no doze/nap */
@@ -693,6 +740,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
693 .cpu_setup = __setup_cpu_745x, 740 .cpu_setup = __setup_cpu_745x,
694 .oprofile_cpu_type = "ppc/7450", 741 .oprofile_cpu_type = "ppc/7450",
695 .oprofile_type = PPC_OPROFILE_G4, 742 .oprofile_type = PPC_OPROFILE_G4,
743 .machine_check = machine_check_generic,
696 .platform = "ppc7450", 744 .platform = "ppc7450",
697 }, 745 },
698 { /* 7450 2.1 */ 746 { /* 7450 2.1 */
@@ -708,6 +756,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
708 .cpu_setup = __setup_cpu_745x, 756 .cpu_setup = __setup_cpu_745x,
709 .oprofile_cpu_type = "ppc/7450", 757 .oprofile_cpu_type = "ppc/7450",
710 .oprofile_type = PPC_OPROFILE_G4, 758 .oprofile_type = PPC_OPROFILE_G4,
759 .machine_check = machine_check_generic,
711 .platform = "ppc7450", 760 .platform = "ppc7450",
712 }, 761 },
713 { /* 7450 2.3 and newer */ 762 { /* 7450 2.3 and newer */
@@ -723,6 +772,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
723 .cpu_setup = __setup_cpu_745x, 772 .cpu_setup = __setup_cpu_745x,
724 .oprofile_cpu_type = "ppc/7450", 773 .oprofile_cpu_type = "ppc/7450",
725 .oprofile_type = PPC_OPROFILE_G4, 774 .oprofile_type = PPC_OPROFILE_G4,
775 .machine_check = machine_check_generic,
726 .platform = "ppc7450", 776 .platform = "ppc7450",
727 }, 777 },
728 { /* 7455 rev 1.x */ 778 { /* 7455 rev 1.x */
@@ -738,6 +788,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
738 .cpu_setup = __setup_cpu_745x, 788 .cpu_setup = __setup_cpu_745x,
739 .oprofile_cpu_type = "ppc/7450", 789 .oprofile_cpu_type = "ppc/7450",
740 .oprofile_type = PPC_OPROFILE_G4, 790 .oprofile_type = PPC_OPROFILE_G4,
791 .machine_check = machine_check_generic,
741 .platform = "ppc7450", 792 .platform = "ppc7450",
742 }, 793 },
743 { /* 7455 rev 2.0 */ 794 { /* 7455 rev 2.0 */
@@ -753,6 +804,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
753 .cpu_setup = __setup_cpu_745x, 804 .cpu_setup = __setup_cpu_745x,
754 .oprofile_cpu_type = "ppc/7450", 805 .oprofile_cpu_type = "ppc/7450",
755 .oprofile_type = PPC_OPROFILE_G4, 806 .oprofile_type = PPC_OPROFILE_G4,
807 .machine_check = machine_check_generic,
756 .platform = "ppc7450", 808 .platform = "ppc7450",
757 }, 809 },
758 { /* 7455 others */ 810 { /* 7455 others */
@@ -768,6 +820,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
768 .cpu_setup = __setup_cpu_745x, 820 .cpu_setup = __setup_cpu_745x,
769 .oprofile_cpu_type = "ppc/7450", 821 .oprofile_cpu_type = "ppc/7450",
770 .oprofile_type = PPC_OPROFILE_G4, 822 .oprofile_type = PPC_OPROFILE_G4,
823 .machine_check = machine_check_generic,
771 .platform = "ppc7450", 824 .platform = "ppc7450",
772 }, 825 },
773 { /* 7447/7457 Rev 1.0 */ 826 { /* 7447/7457 Rev 1.0 */
@@ -783,6 +836,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
783 .cpu_setup = __setup_cpu_745x, 836 .cpu_setup = __setup_cpu_745x,
784 .oprofile_cpu_type = "ppc/7450", 837 .oprofile_cpu_type = "ppc/7450",
785 .oprofile_type = PPC_OPROFILE_G4, 838 .oprofile_type = PPC_OPROFILE_G4,
839 .machine_check = machine_check_generic,
786 .platform = "ppc7450", 840 .platform = "ppc7450",
787 }, 841 },
788 { /* 7447/7457 Rev 1.1 */ 842 { /* 7447/7457 Rev 1.1 */
@@ -798,6 +852,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
798 .cpu_setup = __setup_cpu_745x, 852 .cpu_setup = __setup_cpu_745x,
799 .oprofile_cpu_type = "ppc/7450", 853 .oprofile_cpu_type = "ppc/7450",
800 .oprofile_type = PPC_OPROFILE_G4, 854 .oprofile_type = PPC_OPROFILE_G4,
855 .machine_check = machine_check_generic,
801 .platform = "ppc7450", 856 .platform = "ppc7450",
802 }, 857 },
803 { /* 7447/7457 Rev 1.2 and later */ 858 { /* 7447/7457 Rev 1.2 and later */
@@ -812,6 +867,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
812 .cpu_setup = __setup_cpu_745x, 867 .cpu_setup = __setup_cpu_745x,
813 .oprofile_cpu_type = "ppc/7450", 868 .oprofile_cpu_type = "ppc/7450",
814 .oprofile_type = PPC_OPROFILE_G4, 869 .oprofile_type = PPC_OPROFILE_G4,
870 .machine_check = machine_check_generic,
815 .platform = "ppc7450", 871 .platform = "ppc7450",
816 }, 872 },
817 { /* 7447A */ 873 { /* 7447A */
@@ -827,6 +883,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
827 .cpu_setup = __setup_cpu_745x, 883 .cpu_setup = __setup_cpu_745x,
828 .oprofile_cpu_type = "ppc/7450", 884 .oprofile_cpu_type = "ppc/7450",
829 .oprofile_type = PPC_OPROFILE_G4, 885 .oprofile_type = PPC_OPROFILE_G4,
886 .machine_check = machine_check_generic,
830 .platform = "ppc7450", 887 .platform = "ppc7450",
831 }, 888 },
832 { /* 7448 */ 889 { /* 7448 */
@@ -842,6 +899,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
842 .cpu_setup = __setup_cpu_745x, 899 .cpu_setup = __setup_cpu_745x,
843 .oprofile_cpu_type = "ppc/7450", 900 .oprofile_cpu_type = "ppc/7450",
844 .oprofile_type = PPC_OPROFILE_G4, 901 .oprofile_type = PPC_OPROFILE_G4,
902 .machine_check = machine_check_generic,
845 .platform = "ppc7450", 903 .platform = "ppc7450",
846 }, 904 },
847 { /* 82xx (8240, 8245, 8260 are all 603e cores) */ 905 { /* 82xx (8240, 8245, 8260 are all 603e cores) */
@@ -853,6 +911,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
853 .icache_bsize = 32, 911 .icache_bsize = 32,
854 .dcache_bsize = 32, 912 .dcache_bsize = 32,
855 .cpu_setup = __setup_cpu_603, 913 .cpu_setup = __setup_cpu_603,
914 .machine_check = machine_check_generic,
856 .platform = "ppc603", 915 .platform = "ppc603",
857 }, 916 },
858 { /* All G2_LE (603e core, plus some) have the same pvr */ 917 { /* All G2_LE (603e core, plus some) have the same pvr */
@@ -864,6 +923,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
864 .icache_bsize = 32, 923 .icache_bsize = 32,
865 .dcache_bsize = 32, 924 .dcache_bsize = 32,
866 .cpu_setup = __setup_cpu_603, 925 .cpu_setup = __setup_cpu_603,
926 .machine_check = machine_check_generic,
867 .platform = "ppc603", 927 .platform = "ppc603",
868 }, 928 },
869 { /* e300c1 (a 603e core, plus some) on 83xx */ 929 { /* e300c1 (a 603e core, plus some) on 83xx */
@@ -875,6 +935,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
875 .icache_bsize = 32, 935 .icache_bsize = 32,
876 .dcache_bsize = 32, 936 .dcache_bsize = 32,
877 .cpu_setup = __setup_cpu_603, 937 .cpu_setup = __setup_cpu_603,
938 .machine_check = machine_check_generic,
878 .platform = "ppc603", 939 .platform = "ppc603",
879 }, 940 },
880 { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ 941 { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
@@ -886,9 +947,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
886 .icache_bsize = 32, 947 .icache_bsize = 32,
887 .dcache_bsize = 32, 948 .dcache_bsize = 32,
888 .cpu_setup = __setup_cpu_603, 949 .cpu_setup = __setup_cpu_603,
950 .machine_check = machine_check_generic,
889 .platform = "ppc603", 951 .platform = "ppc603",
890 }, 952 },
891 { /* e300c3 on 83xx */ 953 { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
892 .pvr_mask = 0x7fff0000, 954 .pvr_mask = 0x7fff0000,
893 .pvr_value = 0x00850000, 955 .pvr_value = 0x00850000,
894 .cpu_name = "e300c3", 956 .cpu_name = "e300c3",
@@ -899,6 +961,18 @@ static struct cpu_spec __initdata cpu_specs[] = {
899 .cpu_setup = __setup_cpu_603, 961 .cpu_setup = __setup_cpu_603,
900 .platform = "ppc603", 962 .platform = "ppc603",
901 }, 963 },
964 { /* e300c4 (e300c1, plus one IU) */
965 .pvr_mask = 0x7fff0000,
966 .pvr_value = 0x00860000,
967 .cpu_name = "e300c4",
968 .cpu_features = CPU_FTRS_E300,
969 .cpu_user_features = COMMON_USER,
970 .icache_bsize = 32,
971 .dcache_bsize = 32,
972 .cpu_setup = __setup_cpu_603,
973 .machine_check = machine_check_generic,
974 .platform = "ppc603",
975 },
902 { /* default match, we assume split I/D cache & TB (non-601)... */ 976 { /* default match, we assume split I/D cache & TB (non-601)... */
903 .pvr_mask = 0x00000000, 977 .pvr_mask = 0x00000000,
904 .pvr_value = 0x00000000, 978 .pvr_value = 0x00000000,
@@ -907,6 +981,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
907 .cpu_user_features = COMMON_USER, 981 .cpu_user_features = COMMON_USER,
908 .icache_bsize = 32, 982 .icache_bsize = 32,
909 .dcache_bsize = 32, 983 .dcache_bsize = 32,
984 .machine_check = machine_check_generic,
910 .platform = "ppc603", 985 .platform = "ppc603",
911 }, 986 },
912#endif /* CLASSIC_PPC */ 987#endif /* CLASSIC_PPC */
@@ -933,6 +1008,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
933 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1008 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
934 .icache_bsize = 16, 1009 .icache_bsize = 16,
935 .dcache_bsize = 16, 1010 .dcache_bsize = 16,
1011 .machine_check = machine_check_4xx,
936 .platform = "ppc403", 1012 .platform = "ppc403",
937 }, 1013 },
938 { /* 403GCX */ 1014 { /* 403GCX */
@@ -944,6 +1020,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
944 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 1020 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
945 .icache_bsize = 16, 1021 .icache_bsize = 16,
946 .dcache_bsize = 16, 1022 .dcache_bsize = 16,
1023 .machine_check = machine_check_4xx,
947 .platform = "ppc403", 1024 .platform = "ppc403",
948 }, 1025 },
949 { /* 403G ?? */ 1026 { /* 403G ?? */
@@ -954,6 +1031,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
954 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1031 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
955 .icache_bsize = 16, 1032 .icache_bsize = 16,
956 .dcache_bsize = 16, 1033 .dcache_bsize = 16,
1034 .machine_check = machine_check_4xx,
957 .platform = "ppc403", 1035 .platform = "ppc403",
958 }, 1036 },
959 { /* 405GP */ 1037 { /* 405GP */
@@ -965,6 +1043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
965 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1043 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
966 .icache_bsize = 32, 1044 .icache_bsize = 32,
967 .dcache_bsize = 32, 1045 .dcache_bsize = 32,
1046 .machine_check = machine_check_4xx,
968 .platform = "ppc405", 1047 .platform = "ppc405",
969 }, 1048 },
970 { /* STB 03xxx */ 1049 { /* STB 03xxx */
@@ -976,6 +1055,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
976 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1055 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
977 .icache_bsize = 32, 1056 .icache_bsize = 32,
978 .dcache_bsize = 32, 1057 .dcache_bsize = 32,
1058 .machine_check = machine_check_4xx,
979 .platform = "ppc405", 1059 .platform = "ppc405",
980 }, 1060 },
981 { /* STB 04xxx */ 1061 { /* STB 04xxx */
@@ -987,6 +1067,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
987 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1067 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
988 .icache_bsize = 32, 1068 .icache_bsize = 32,
989 .dcache_bsize = 32, 1069 .dcache_bsize = 32,
1070 .machine_check = machine_check_4xx,
990 .platform = "ppc405", 1071 .platform = "ppc405",
991 }, 1072 },
992 { /* NP405L */ 1073 { /* NP405L */
@@ -998,6 +1079,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
998 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1079 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
999 .icache_bsize = 32, 1080 .icache_bsize = 32,
1000 .dcache_bsize = 32, 1081 .dcache_bsize = 32,
1082 .machine_check = machine_check_4xx,
1001 .platform = "ppc405", 1083 .platform = "ppc405",
1002 }, 1084 },
1003 { /* NP4GS3 */ 1085 { /* NP4GS3 */
@@ -1009,6 +1091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1009 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1091 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1010 .icache_bsize = 32, 1092 .icache_bsize = 32,
1011 .dcache_bsize = 32, 1093 .dcache_bsize = 32,
1094 .machine_check = machine_check_4xx,
1012 .platform = "ppc405", 1095 .platform = "ppc405",
1013 }, 1096 },
1014 { /* NP405H */ 1097 { /* NP405H */
@@ -1020,6 +1103,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1020 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1103 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1021 .icache_bsize = 32, 1104 .icache_bsize = 32,
1022 .dcache_bsize = 32, 1105 .dcache_bsize = 32,
1106 .machine_check = machine_check_4xx,
1023 .platform = "ppc405", 1107 .platform = "ppc405",
1024 }, 1108 },
1025 { /* 405GPr */ 1109 { /* 405GPr */
@@ -1031,6 +1115,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1031 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1115 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1032 .icache_bsize = 32, 1116 .icache_bsize = 32,
1033 .dcache_bsize = 32, 1117 .dcache_bsize = 32,
1118 .machine_check = machine_check_4xx,
1034 .platform = "ppc405", 1119 .platform = "ppc405",
1035 }, 1120 },
1036 { /* STBx25xx */ 1121 { /* STBx25xx */
@@ -1042,6 +1127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1042 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1127 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1043 .icache_bsize = 32, 1128 .icache_bsize = 32,
1044 .dcache_bsize = 32, 1129 .dcache_bsize = 32,
1130 .machine_check = machine_check_4xx,
1045 .platform = "ppc405", 1131 .platform = "ppc405",
1046 }, 1132 },
1047 { /* 405LP */ 1133 { /* 405LP */
@@ -1052,6 +1138,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1052 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1138 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1053 .icache_bsize = 32, 1139 .icache_bsize = 32,
1054 .dcache_bsize = 32, 1140 .dcache_bsize = 32,
1141 .machine_check = machine_check_4xx,
1055 .platform = "ppc405", 1142 .platform = "ppc405",
1056 }, 1143 },
1057 { /* Xilinx Virtex-II Pro */ 1144 { /* Xilinx Virtex-II Pro */
@@ -1063,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1063 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1150 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1064 .icache_bsize = 32, 1151 .icache_bsize = 32,
1065 .dcache_bsize = 32, 1152 .dcache_bsize = 32,
1153 .machine_check = machine_check_4xx,
1066 .platform = "ppc405", 1154 .platform = "ppc405",
1067 }, 1155 },
1068 { /* Xilinx Virtex-4 FX */ 1156 { /* Xilinx Virtex-4 FX */
@@ -1074,6 +1162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1074 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1162 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1075 .icache_bsize = 32, 1163 .icache_bsize = 32,
1076 .dcache_bsize = 32, 1164 .dcache_bsize = 32,
1165 .machine_check = machine_check_4xx,
1077 .platform = "ppc405", 1166 .platform = "ppc405",
1078 }, 1167 },
1079 { /* 405EP */ 1168 { /* 405EP */
@@ -1085,17 +1174,31 @@ static struct cpu_spec __initdata cpu_specs[] = {
1085 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1174 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1086 .icache_bsize = 32, 1175 .icache_bsize = 32,
1087 .dcache_bsize = 32, 1176 .dcache_bsize = 32,
1177 .machine_check = machine_check_4xx,
1088 .platform = "ppc405", 1178 .platform = "ppc405",
1089 }, 1179 },
1090 { /* 405EX */ 1180 { /* 405EX */
1091 .pvr_mask = 0xffff0000, 1181 .pvr_mask = 0xffff0004,
1092 .pvr_value = 0x12910000, 1182 .pvr_value = 0x12910004,
1093 .cpu_name = "405EX", 1183 .cpu_name = "405EX",
1094 .cpu_features = CPU_FTRS_40X, 1184 .cpu_features = CPU_FTRS_40X,
1095 .cpu_user_features = PPC_FEATURE_32 | 1185 .cpu_user_features = PPC_FEATURE_32 |
1096 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1186 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1097 .icache_bsize = 32, 1187 .icache_bsize = 32,
1098 .dcache_bsize = 32, 1188 .dcache_bsize = 32,
1189 .machine_check = machine_check_4xx,
1190 .platform = "ppc405",
1191 },
1192 { /* 405EXr */
1193 .pvr_mask = 0xffff0004,
1194 .pvr_value = 0x12910000,
1195 .cpu_name = "405EXr",
1196 .cpu_features = CPU_FTRS_40X,
1197 .cpu_user_features = PPC_FEATURE_32 |
1198 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1199 .icache_bsize = 32,
1200 .dcache_bsize = 32,
1201 .machine_check = machine_check_4xx,
1099 .platform = "ppc405", 1202 .platform = "ppc405",
1100 }, 1203 },
1101 1204
@@ -1109,6 +1212,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1109 .cpu_user_features = COMMON_USER_BOOKE, 1212 .cpu_user_features = COMMON_USER_BOOKE,
1110 .icache_bsize = 32, 1213 .icache_bsize = 32,
1111 .dcache_bsize = 32, 1214 .dcache_bsize = 32,
1215 .machine_check = machine_check_4xx,
1112 .platform = "ppc440", 1216 .platform = "ppc440",
1113 }, 1217 },
1114 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ 1218 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
@@ -1120,6 +1224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1120 .icache_bsize = 32, 1224 .icache_bsize = 32,
1121 .dcache_bsize = 32, 1225 .dcache_bsize = 32,
1122 .cpu_setup = __setup_cpu_440ep, 1226 .cpu_setup = __setup_cpu_440ep,
1227 .machine_check = machine_check_4xx,
1123 .platform = "ppc440", 1228 .platform = "ppc440",
1124 }, 1229 },
1125 { 1230 {
@@ -1130,6 +1235,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
1130 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1235 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1131 .icache_bsize = 32, 1236 .icache_bsize = 32,
1132 .dcache_bsize = 32, 1237 .dcache_bsize = 32,
1238 .machine_check = machine_check_4xx,
1239 .platform = "ppc440",
1240 },
1241 { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
1242 .pvr_mask = 0xf0000ff7,
1243 .pvr_value = 0x400008d4,
1244 .cpu_name = "440EP Rev. C",
1245 .cpu_features = CPU_FTRS_44X,
1246 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1247 .icache_bsize = 32,
1248 .dcache_bsize = 32,
1249 .cpu_setup = __setup_cpu_440ep,
1250 .machine_check = machine_check_4xx,
1133 .platform = "ppc440", 1251 .platform = "ppc440",
1134 }, 1252 },
1135 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ 1253 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
@@ -1141,6 +1259,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1141 .icache_bsize = 32, 1259 .icache_bsize = 32,
1142 .dcache_bsize = 32, 1260 .dcache_bsize = 32,
1143 .cpu_setup = __setup_cpu_440ep, 1261 .cpu_setup = __setup_cpu_440ep,
1262 .machine_check = machine_check_4xx,
1144 .platform = "ppc440", 1263 .platform = "ppc440",
1145 }, 1264 },
1146 { /* 440GRX */ 1265 { /* 440GRX */
@@ -1152,6 +1271,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1152 .icache_bsize = 32, 1271 .icache_bsize = 32,
1153 .dcache_bsize = 32, 1272 .dcache_bsize = 32,
1154 .cpu_setup = __setup_cpu_440grx, 1273 .cpu_setup = __setup_cpu_440grx,
1274 .machine_check = machine_check_440A,
1155 .platform = "ppc440", 1275 .platform = "ppc440",
1156 }, 1276 },
1157 { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ 1277 { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
@@ -1163,6 +1283,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1163 .icache_bsize = 32, 1283 .icache_bsize = 32,
1164 .dcache_bsize = 32, 1284 .dcache_bsize = 32,
1165 .cpu_setup = __setup_cpu_440epx, 1285 .cpu_setup = __setup_cpu_440epx,
1286 .machine_check = machine_check_440A,
1166 .platform = "ppc440", 1287 .platform = "ppc440",
1167 }, 1288 },
1168 { /* 440GP Rev. B */ 1289 { /* 440GP Rev. B */
@@ -1173,6 +1294,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1173 .cpu_user_features = COMMON_USER_BOOKE, 1294 .cpu_user_features = COMMON_USER_BOOKE,
1174 .icache_bsize = 32, 1295 .icache_bsize = 32,
1175 .dcache_bsize = 32, 1296 .dcache_bsize = 32,
1297 .machine_check = machine_check_4xx,
1176 .platform = "ppc440gp", 1298 .platform = "ppc440gp",
1177 }, 1299 },
1178 { /* 440GP Rev. C */ 1300 { /* 440GP Rev. C */
@@ -1183,6 +1305,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1183 .cpu_user_features = COMMON_USER_BOOKE, 1305 .cpu_user_features = COMMON_USER_BOOKE,
1184 .icache_bsize = 32, 1306 .icache_bsize = 32,
1185 .dcache_bsize = 32, 1307 .dcache_bsize = 32,
1308 .machine_check = machine_check_4xx,
1186 .platform = "ppc440gp", 1309 .platform = "ppc440gp",
1187 }, 1310 },
1188 { /* 440GX Rev. A */ 1311 { /* 440GX Rev. A */
@@ -1193,6 +1316,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1193 .cpu_user_features = COMMON_USER_BOOKE, 1316 .cpu_user_features = COMMON_USER_BOOKE,
1194 .icache_bsize = 32, 1317 .icache_bsize = 32,
1195 .dcache_bsize = 32, 1318 .dcache_bsize = 32,
1319 .cpu_setup = __setup_cpu_440gx,
1320 .machine_check = machine_check_440A,
1196 .platform = "ppc440", 1321 .platform = "ppc440",
1197 }, 1322 },
1198 { /* 440GX Rev. B */ 1323 { /* 440GX Rev. B */
@@ -1203,6 +1328,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1203 .cpu_user_features = COMMON_USER_BOOKE, 1328 .cpu_user_features = COMMON_USER_BOOKE,
1204 .icache_bsize = 32, 1329 .icache_bsize = 32,
1205 .dcache_bsize = 32, 1330 .dcache_bsize = 32,
1331 .cpu_setup = __setup_cpu_440gx,
1332 .machine_check = machine_check_440A,
1206 .platform = "ppc440", 1333 .platform = "ppc440",
1207 }, 1334 },
1208 { /* 440GX Rev. C */ 1335 { /* 440GX Rev. C */
@@ -1213,6 +1340,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1213 .cpu_user_features = COMMON_USER_BOOKE, 1340 .cpu_user_features = COMMON_USER_BOOKE,
1214 .icache_bsize = 32, 1341 .icache_bsize = 32,
1215 .dcache_bsize = 32, 1342 .dcache_bsize = 32,
1343 .cpu_setup = __setup_cpu_440gx,
1344 .machine_check = machine_check_440A,
1216 .platform = "ppc440", 1345 .platform = "ppc440",
1217 }, 1346 },
1218 { /* 440GX Rev. F */ 1347 { /* 440GX Rev. F */
@@ -1223,6 +1352,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1223 .cpu_user_features = COMMON_USER_BOOKE, 1352 .cpu_user_features = COMMON_USER_BOOKE,
1224 .icache_bsize = 32, 1353 .icache_bsize = 32,
1225 .dcache_bsize = 32, 1354 .dcache_bsize = 32,
1355 .cpu_setup = __setup_cpu_440gx,
1356 .machine_check = machine_check_440A,
1226 .platform = "ppc440", 1357 .platform = "ppc440",
1227 }, 1358 },
1228 { /* 440SP Rev. A */ 1359 { /* 440SP Rev. A */
@@ -1233,6 +1364,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1233 .cpu_user_features = COMMON_USER_BOOKE, 1364 .cpu_user_features = COMMON_USER_BOOKE,
1234 .icache_bsize = 32, 1365 .icache_bsize = 32,
1235 .dcache_bsize = 32, 1366 .dcache_bsize = 32,
1367 .machine_check = machine_check_4xx,
1236 .platform = "ppc440", 1368 .platform = "ppc440",
1237 }, 1369 },
1238 { /* 440SPe Rev. A */ 1370 { /* 440SPe Rev. A */
@@ -1243,6 +1375,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1243 .cpu_user_features = COMMON_USER_BOOKE, 1375 .cpu_user_features = COMMON_USER_BOOKE,
1244 .icache_bsize = 32, 1376 .icache_bsize = 32,
1245 .dcache_bsize = 32, 1377 .dcache_bsize = 32,
1378 .cpu_setup = __setup_cpu_440spe,
1379 .machine_check = machine_check_440A,
1246 .platform = "ppc440", 1380 .platform = "ppc440",
1247 }, 1381 },
1248 { /* 440SPe Rev. B */ 1382 { /* 440SPe Rev. B */
@@ -1253,10 +1387,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
1253 .cpu_user_features = COMMON_USER_BOOKE, 1387 .cpu_user_features = COMMON_USER_BOOKE,
1254 .icache_bsize = 32, 1388 .icache_bsize = 32,
1255 .dcache_bsize = 32, 1389 .dcache_bsize = 32,
1390 .cpu_setup = __setup_cpu_440spe,
1391 .machine_check = machine_check_440A,
1256 .platform = "ppc440", 1392 .platform = "ppc440",
1257 }, 1393 },
1258#endif /* CONFIG_44x */ 1394#endif /* CONFIG_44x */
1259#ifdef CONFIG_FSL_BOOKE 1395#ifdef CONFIG_FSL_BOOKE
1396#ifdef CONFIG_E200
1260 { /* e200z5 */ 1397 { /* e200z5 */
1261 .pvr_mask = 0xfff00000, 1398 .pvr_mask = 0xfff00000,
1262 .pvr_value = 0x81000000, 1399 .pvr_value = 0x81000000,
@@ -1267,6 +1404,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1267 PPC_FEATURE_HAS_EFP_SINGLE | 1404 PPC_FEATURE_HAS_EFP_SINGLE |
1268 PPC_FEATURE_UNIFIED_CACHE, 1405 PPC_FEATURE_UNIFIED_CACHE,
1269 .dcache_bsize = 32, 1406 .dcache_bsize = 32,
1407 .machine_check = machine_check_e200,
1270 .platform = "ppc5554", 1408 .platform = "ppc5554",
1271 }, 1409 },
1272 { /* e200z6 */ 1410 { /* e200z6 */
@@ -1280,8 +1418,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
1280 PPC_FEATURE_HAS_EFP_SINGLE_COMP | 1418 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1281 PPC_FEATURE_UNIFIED_CACHE, 1419 PPC_FEATURE_UNIFIED_CACHE,
1282 .dcache_bsize = 32, 1420 .dcache_bsize = 32,
1421 .machine_check = machine_check_e200,
1283 .platform = "ppc5554", 1422 .platform = "ppc5554",
1284 }, 1423 },
1424#elif defined(CONFIG_E500)
1285 { /* e500 */ 1425 { /* e500 */
1286 .pvr_mask = 0xffff0000, 1426 .pvr_mask = 0xffff0000,
1287 .pvr_value = 0x80200000, 1427 .pvr_value = 0x80200000,
@@ -1296,6 +1436,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1296 .num_pmcs = 4, 1436 .num_pmcs = 4,
1297 .oprofile_cpu_type = "ppc/e500", 1437 .oprofile_cpu_type = "ppc/e500",
1298 .oprofile_type = PPC_OPROFILE_BOOKE, 1438 .oprofile_type = PPC_OPROFILE_BOOKE,
1439 .machine_check = machine_check_e500,
1299 .platform = "ppc8540", 1440 .platform = "ppc8540",
1300 }, 1441 },
1301 { /* e500v2 */ 1442 { /* e500v2 */
@@ -1313,9 +1454,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
1313 .num_pmcs = 4, 1454 .num_pmcs = 4,
1314 .oprofile_cpu_type = "ppc/e500", 1455 .oprofile_cpu_type = "ppc/e500",
1315 .oprofile_type = PPC_OPROFILE_BOOKE, 1456 .oprofile_type = PPC_OPROFILE_BOOKE,
1457 .machine_check = machine_check_e500,
1316 .platform = "ppc8548", 1458 .platform = "ppc8548",
1317 }, 1459 },
1318#endif 1460#endif
1461#endif
1319#if !CLASSIC_PPC 1462#if !CLASSIC_PPC
1320 { /* default match */ 1463 { /* default match */
1321 .pvr_mask = 0x00000000, 1464 .pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 77c749a13378..571132ed12c1 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -32,6 +32,8 @@
32#include <asm/lmb.h> 32#include <asm/lmb.h>
33#include <asm/firmware.h> 33#include <asm/firmware.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
35#include <asm/system.h>
36#include <asm/setjmp.h>
35 37
36#ifdef DEBUG 38#ifdef DEBUG
37#include <asm/udbg.h> 39#include <asm/udbg.h>
@@ -45,6 +47,11 @@ int crashing_cpu = -1;
45static cpumask_t cpus_in_crash = CPU_MASK_NONE; 47static cpumask_t cpus_in_crash = CPU_MASK_NONE;
46cpumask_t cpus_in_sr = CPU_MASK_NONE; 48cpumask_t cpus_in_sr = CPU_MASK_NONE;
47 49
50#define CRASH_HANDLER_MAX 1
51/* NULL terminated list of shutdown handles */
52static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
53static DEFINE_SPINLOCK(crash_handlers_lock);
54
48#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
49static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); 56static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
50 57
@@ -285,9 +292,72 @@ static inline void crash_kexec_stop_spus(void)
285} 292}
286#endif /* CONFIG_SPU_BASE */ 293#endif /* CONFIG_SPU_BASE */
287 294
295/*
296 * Register a function to be called on shutdown. Only use this if you
297 * can't reset your device in the second kernel.
298 */
299int crash_shutdown_register(crash_shutdown_t handler)
300{
301 unsigned int i, rc;
302
303 spin_lock(&crash_handlers_lock);
304 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
305 if (!crash_shutdown_handles[i]) {
306 /* Insert handle at first empty entry */
307 crash_shutdown_handles[i] = handler;
308 rc = 0;
309 break;
310 }
311
312 if (i == CRASH_HANDLER_MAX) {
313 printk(KERN_ERR "Crash shutdown handles full, "
314 "not registered.\n");
315 rc = 1;
316 }
317
318 spin_unlock(&crash_handlers_lock);
319 return rc;
320}
321EXPORT_SYMBOL(crash_shutdown_register);
322
323int crash_shutdown_unregister(crash_shutdown_t handler)
324{
325 unsigned int i, rc;
326
327 spin_lock(&crash_handlers_lock);
328 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
329 if (crash_shutdown_handles[i] == handler)
330 break;
331
332 if (i == CRASH_HANDLER_MAX) {
333 printk(KERN_ERR "Crash shutdown handle not found\n");
334 rc = 1;
335 } else {
336 /* Shift handles down */
337 for (; crash_shutdown_handles[i]; i++)
338 crash_shutdown_handles[i] =
339 crash_shutdown_handles[i+1];
340 rc = 0;
341 }
342
343 spin_unlock(&crash_handlers_lock);
344 return rc;
345}
346EXPORT_SYMBOL(crash_shutdown_unregister);
347
348static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
349
350static int handle_fault(struct pt_regs *regs)
351{
352 longjmp(crash_shutdown_buf, 1);
353 return 0;
354}
355
288void default_machine_crash_shutdown(struct pt_regs *regs) 356void default_machine_crash_shutdown(struct pt_regs *regs)
289{ 357{
290 unsigned int irq; 358 unsigned int i;
359 int (*old_handler)(struct pt_regs *regs);
360
291 361
292 /* 362 /*
293 * This function is only called after the system 363 * This function is only called after the system
@@ -301,15 +371,36 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
301 */ 371 */
302 hard_irq_disable(); 372 hard_irq_disable();
303 373
304 for_each_irq(irq) { 374 for_each_irq(i) {
305 struct irq_desc *desc = irq_desc + irq; 375 struct irq_desc *desc = irq_desc + i;
306 376
307 if (desc->status & IRQ_INPROGRESS) 377 if (desc->status & IRQ_INPROGRESS)
308 desc->chip->eoi(irq); 378 desc->chip->eoi(i);
309 379
310 if (!(desc->status & IRQ_DISABLED)) 380 if (!(desc->status & IRQ_DISABLED))
311 desc->chip->disable(irq); 381 desc->chip->disable(i);
382 }
383
384 /*
385 * Call registered shutdown routines savely. Swap out
386 * __debugger_fault_handler, and replace on exit.
387 */
388 old_handler = __debugger_fault_handler;
389 __debugger_fault_handler = handle_fault;
390 for (i = 0; crash_shutdown_handles[i]; i++) {
391 if (setjmp(crash_shutdown_buf) == 0) {
392 /*
393 * Insert syncs and delay to ensure
394 * instructions in the dangerous region don't
395 * leak away from this protected region.
396 */
397 asm volatile("sync; isync");
398 /* dangerous region */
399 crash_shutdown_handles[i]();
400 asm volatile("sync; isync");
401 }
312 } 402 }
403 __debugger_fault_handler = old_handler;
313 404
314 /* 405 /*
315 * Make a note of crashing cpu. Will be used in machine_kexec 406 * Make a note of crashing cpu. Will be used in machine_kexec
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 14206e3f0819..84239076a5b8 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -112,10 +112,16 @@ EXPORT_SYMBOL(dma_iommu_ops);
112/* 112/*
113 * Generic direct DMA implementation 113 * Generic direct DMA implementation
114 * 114 *
115 * This implementation supports a global offset that can be applied if 115 * This implementation supports a per-device offset that can be applied if
116 * the address at which memory is visible to devices is not 0. 116 * the address at which memory is visible to devices is not 0. Platform code
117 * can set archdata.dma_data to an unsigned long holding the offset. By
118 * default the offset is zero.
117 */ 119 */
118unsigned long dma_direct_offset; 120
121static unsigned long get_dma_direct_offset(struct device *dev)
122{
123 return (unsigned long)dev->archdata.dma_data;
124}
119 125
120static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 126static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
121 dma_addr_t *dma_handle, gfp_t flag) 127 dma_addr_t *dma_handle, gfp_t flag)
@@ -124,13 +130,12 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
124 void *ret; 130 void *ret;
125 int node = dev->archdata.numa_node; 131 int node = dev->archdata.numa_node;
126 132
127 /* TODO: Maybe use the numa node here too ? */
128 page = alloc_pages_node(node, flag, get_order(size)); 133 page = alloc_pages_node(node, flag, get_order(size));
129 if (page == NULL) 134 if (page == NULL)
130 return NULL; 135 return NULL;
131 ret = page_address(page); 136 ret = page_address(page);
132 memset(ret, 0, size); 137 memset(ret, 0, size);
133 *dma_handle = virt_to_abs(ret) | dma_direct_offset; 138 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
134 139
135 return ret; 140 return ret;
136} 141}
@@ -145,7 +150,7 @@ static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
145 size_t size, 150 size_t size,
146 enum dma_data_direction direction) 151 enum dma_data_direction direction)
147{ 152{
148 return virt_to_abs(ptr) | dma_direct_offset; 153 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
149} 154}
150 155
151static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, 156static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
@@ -161,7 +166,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
161 int i; 166 int i;
162 167
163 for_each_sg(sgl, sg, nents, i) { 168 for_each_sg(sgl, sg, nents, i) {
164 sg->dma_address = sg_phys(sg) | dma_direct_offset; 169 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
165 sg->dma_length = sg->length; 170 sg->dma_length = sg->length;
166 } 171 }
167 172
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 56aba84c1f6e..ad071a146a8d 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -289,11 +289,8 @@ interrupt_base:
289 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 289 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
290 290
291 /* Machine Check Interrupt */ 291 /* Machine Check Interrupt */
292#ifdef CONFIG_440A
293 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
294#else
295 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 292 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
296#endif 293 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
297 294
298 /* Data Storage Interrupt */ 295 /* Data Storage Interrupt */
299 START_EXCEPTION(DataStorage) 296 START_EXCEPTION(DataStorage)
@@ -674,6 +671,15 @@ finish_tlb_load:
674 */ 671 */
675 672
676/* 673/*
674 * Adjust the machine check IVOR on 440A cores
675 */
676_GLOBAL(__fixup_440A_mcheck)
677 li r3,MachineCheckA@l
678 mtspr SPRN_IVOR1,r3
679 sync
680 blr
681
682/*
677 * extern void giveup_altivec(struct task_struct *prev) 683 * extern void giveup_altivec(struct task_struct *prev)
678 * 684 *
679 * The 44x core does not have an AltiVec unit. 685 * The 44x core does not have an AltiVec unit.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c34986835a4e..11b4f6d9ffce 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -903,6 +903,7 @@ handle_page_fault:
903 * the PTE insertion 903 * the PTE insertion
904 */ 904 */
90512: bl .save_nvgprs 90512: bl .save_nvgprs
906 mr r5,r3
906 addi r3,r1,STACK_FRAME_OVERHEAD 907 addi r3,r1,STACK_FRAME_OVERHEAD
907 ld r4,_DAR(r1) 908 ld r4,_DAR(r1)
908 bl .low_hash_fault 909 bl .low_hash_fault
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 8536e7676160..ba9393f8e77a 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -166,7 +166,7 @@ label:
166 mfspr r5,SPRN_ESR; \ 166 mfspr r5,SPRN_ESR; \
167 stw r5,_ESR(r11); \ 167 stw r5,_ESR(r11); \
168 addi r3,r1,STACK_FRAME_OVERHEAD; \ 168 addi r3,r1,STACK_FRAME_OVERHEAD; \
169 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ 169 EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
170 NOCOPY, mcheck_transfer_to_handler, \ 170 NOCOPY, mcheck_transfer_to_handler, \
171 ret_from_mcheck_exc) 171 ret_from_mcheck_exc)
172 172
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 7aecb39a5a45..d9cc2c288d9e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -73,8 +73,8 @@ _ENTRY(_start);
73/* We try to not make any assumptions about how the boot loader 73/* We try to not make any assumptions about how the boot loader
74 * setup or used the TLBs. We invalidate all mappings from the 74 * setup or used the TLBs. We invalidate all mappings from the
75 * boot loader and load a single entry in TLB1[0] to map the 75 * boot loader and load a single entry in TLB1[0] to map the
76 * first 16M of kernel memory. Any boot info passed from the 76 * first 64M of kernel memory. Any boot info passed from the
77 * bootloader needs to live in this first 16M. 77 * bootloader needs to live in this first 64M.
78 * 78 *
79 * Requirement on bootloader: 79 * Requirement on bootloader:
80 * - The page we're executing in needs to reside in TLB1 and 80 * - The page we're executing in needs to reside in TLB1 and
@@ -167,7 +167,7 @@ skpinv: addi r6,r6,1 /* Increment */
167 mtspr SPRN_MAS0,r7 167 mtspr SPRN_MAS0,r7
168 tlbre 168 tlbre
169 169
170 /* Just modify the entry ID and EPN for the temp mapping */ 170 /* Just modify the entry ID, EPN and RPN for the temp mapping */
171 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 171 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
172 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 172 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
173 mtspr SPRN_MAS0,r7 173 mtspr SPRN_MAS0,r7
@@ -177,9 +177,12 @@ skpinv: addi r6,r6,1 /* Increment */
177 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l 177 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
178 mtspr SPRN_MAS1,r6 178 mtspr SPRN_MAS1,r6
179 mfspr r6,SPRN_MAS2 179 mfspr r6,SPRN_MAS2
180 li r7,0 /* temp EPN = 0 */ 180 lis r7,PHYSICAL_START@h
181 rlwimi r7,r6,0,20,31 181 rlwimi r7,r6,0,20,31
182 mtspr SPRN_MAS2,r7 182 mtspr SPRN_MAS2,r7
183 mfspr r6,SPRN_MAS3
184 rlwimi r7,r6,0,20,31
185 mtspr SPRN_MAS3,r7
183 tlbwe 186 tlbwe
184 187
185 xori r6,r4,1 188 xori r6,r4,1
@@ -222,11 +225,11 @@ skpinv: addi r6,r6,1 /* Increment */
222 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 225 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
223 mtspr SPRN_MAS0,r6 226 mtspr SPRN_MAS0,r6
224 lis r6,(MAS1_VALID|MAS1_IPROT)@h 227 lis r6,(MAS1_VALID|MAS1_IPROT)@h
225 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l 228 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
226 mtspr SPRN_MAS1,r6 229 mtspr SPRN_MAS1,r6
227 li r7,0 230 li r7,0
228 lis r6,KERNELBASE@h 231 lis r6,PAGE_OFFSET@h
229 ori r6,r6,KERNELBASE@l 232 ori r6,r6,PAGE_OFFSET@l
230 rlwimi r6,r7,0,20,31 233 rlwimi r6,r7,0,20,31
231 mtspr SPRN_MAS2,r6 234 mtspr SPRN_MAS2,r6
232 li r7,(MAS3_SX|MAS3_SW|MAS3_SR) 235 li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
@@ -234,6 +237,9 @@ skpinv: addi r6,r6,1 /* Increment */
234 tlbwe 237 tlbwe
235 238
236/* 7. Jump to KERNELBASE mapping */ 239/* 7. Jump to KERNELBASE mapping */
240 lis r6,KERNELBASE@h
241 ori r6,r6,KERNELBASE@l
242 rlwimi r6,r7,0,20,31
237 lis r7,MSR_KERNEL@h 243 lis r7,MSR_KERNEL@h
238 ori r7,r7,MSR_KERNEL@l 244 ori r7,r7,MSR_KERNEL@l
239 bl 1f /* Find our address */ 245 bl 1f /* Find our address */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 72fd87156b24..2f50bb5d00f9 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -41,6 +41,7 @@
41#include <linux/kobject.h> 41#include <linux/kobject.h>
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/of.h>
44#include <linux/of_platform.h> 45#include <linux/of_platform.h>
45#include <asm/ibmebus.h> 46#include <asm/ibmebus.h>
46#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
@@ -52,7 +53,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
52struct bus_type ibmebus_bus_type; 53struct bus_type ibmebus_bus_type;
53 54
54/* These devices will automatically be added to the bus during init */ 55/* These devices will automatically be added to the bus during init */
55static struct of_device_id builtin_matches[] = { 56static struct of_device_id __initdata builtin_matches[] = {
56 { .compatible = "IBM,lhca" }, 57 { .compatible = "IBM,lhca" },
57 { .compatible = "IBM,lhea" }, 58 { .compatible = "IBM,lhea" },
58 {}, 59 {},
@@ -171,7 +172,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
171 172
172 root = of_find_node_by_path("/"); 173 root = of_find_node_by_path("/");
173 174
174 for (child = NULL; (child = of_get_next_child(root, child)); ) { 175 for_each_child_of_node(root, child) {
175 if (!of_match_node(matches, child)) 176 if (!of_match_node(matches, child))
176 continue; 177 continue;
177 178
@@ -197,16 +198,13 @@ int ibmebus_register_driver(struct of_platform_driver *drv)
197 /* If the driver uses devices that ibmebus doesn't know, add them */ 198 /* If the driver uses devices that ibmebus doesn't know, add them */
198 ibmebus_create_devices(drv->match_table); 199 ibmebus_create_devices(drv->match_table);
199 200
200 drv->driver.name = drv->name; 201 return of_register_driver(drv, &ibmebus_bus_type);
201 drv->driver.bus = &ibmebus_bus_type;
202
203 return driver_register(&drv->driver);
204} 202}
205EXPORT_SYMBOL(ibmebus_register_driver); 203EXPORT_SYMBOL(ibmebus_register_driver);
206 204
207void ibmebus_unregister_driver(struct of_platform_driver *drv) 205void ibmebus_unregister_driver(struct of_platform_driver *drv)
208{ 206{
209 driver_unregister(&drv->driver); 207 of_unregister_driver(drv);
210} 208}
211EXPORT_SYMBOL(ibmebus_unregister_driver); 209EXPORT_SYMBOL(ibmebus_unregister_driver);
212 210
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 79a85d656871..a3c406aca664 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -532,16 +532,14 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
532 return tbl; 532 return tbl;
533} 533}
534 534
535void iommu_free_table(struct device_node *dn) 535void iommu_free_table(struct iommu_table *tbl, const char *node_name)
536{ 536{
537 struct pci_dn *pdn = dn->data;
538 struct iommu_table *tbl = pdn->iommu_table;
539 unsigned long bitmap_sz, i; 537 unsigned long bitmap_sz, i;
540 unsigned int order; 538 unsigned int order;
541 539
542 if (!tbl || !tbl->it_map) { 540 if (!tbl || !tbl->it_map) {
543 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, 541 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
544 dn->full_name); 542 node_name);
545 return; 543 return;
546 } 544 }
547 545
@@ -550,7 +548,7 @@ void iommu_free_table(struct device_node *dn)
550 for (i = 0; i < (tbl->it_size/64); i++) { 548 for (i = 0; i < (tbl->it_size/64); i++) {
551 if (tbl->it_map[i] != 0) { 549 if (tbl->it_map[i] != 0) {
552 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", 550 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
553 __FUNCTION__, dn->full_name); 551 __FUNCTION__, node_name);
554 break; 552 break;
555 } 553 }
556 } 554 }
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index f0f49d1be3d5..ee172aa42aa7 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -108,7 +108,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
108 if (size > 0x10000) 108 if (size > 0x10000)
109 size = 0x10000; 109 size = 0x10000;
110 110
111 printk(KERN_ERR "no ISA IO ranges or unexpected isa range," 111 printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
112 "mapping 64k\n"); 112 "mapping 64k\n");
113 113
114 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 114 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
@@ -116,7 +116,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
116 return; 116 return;
117 117
118inval_range: 118inval_range:
119 printk(KERN_ERR "no ISA IO ranges or unexpected isa range," 119 printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
120 "mapping 64k\n"); 120 "mapping 64k\n");
121 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 121 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
122 0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED); 122 0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
@@ -145,7 +145,7 @@ void __init isa_bridge_find_early(struct pci_controller *hose)
145 for_each_node_by_type(np, "isa") { 145 for_each_node_by_type(np, "isa") {
146 /* Look for our hose being a parent */ 146 /* Look for our hose being a parent */
147 for (parent = of_get_parent(np); parent;) { 147 for (parent = of_get_parent(np); parent;) {
148 if (parent == hose->arch_data) { 148 if (parent == hose->dn) {
149 of_node_put(parent); 149 of_node_put(parent);
150 break; 150 break;
151 } 151 }
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4ed58875ee17..76b862bd1fe9 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -4,6 +4,7 @@
4#include <linux/serial_core.h> 4#include <linux/serial_core.h>
5#include <linux/console.h> 5#include <linux/console.h>
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/of_device.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm/mmu.h> 9#include <asm/mmu.h>
9#include <asm/prom.h> 10#include <asm/prom.h>
@@ -31,6 +32,15 @@ static struct legacy_serial_info {
31 int irq_check_parent; 32 int irq_check_parent;
32 phys_addr_t taddr; 33 phys_addr_t taddr;
33} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; 34} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
35
36static struct __initdata of_device_id parents[] = {
37 {.type = "soc",},
38 {.type = "tsi-bridge",},
39 {.type = "opb", .compatible = "ibm,opb",},
40 {.compatible = "simple-bus",},
41 {.compatible = "wrs,epld-localbus",},
42};
43
34static unsigned int legacy_serial_count; 44static unsigned int legacy_serial_count;
35static int legacy_serial_console = -1; 45static int legacy_serial_console = -1;
36 46
@@ -306,19 +316,21 @@ void __init find_legacy_serial_ports(void)
306 DBG(" no linux,stdout-path !\n"); 316 DBG(" no linux,stdout-path !\n");
307 } 317 }
308 318
309 /* First fill our array with SOC ports */ 319 /* Iterate over all the 16550 ports, looking for known parents */
310 for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) { 320 for_each_compatible_node(np, "serial", "ns16550") {
311 struct device_node *soc = of_get_parent(np); 321 struct device_node *parent = of_get_parent(np);
312 if (soc && !strcmp(soc->type, "soc")) { 322 if (!parent)
323 continue;
324 if (of_match_node(parents, parent) != NULL) {
313 index = add_legacy_soc_port(np, np); 325 index = add_legacy_soc_port(np, np);
314 if (index >= 0 && np == stdout) 326 if (index >= 0 && np == stdout)
315 legacy_serial_console = index; 327 legacy_serial_console = index;
316 } 328 }
317 of_node_put(soc); 329 of_node_put(parent);
318 } 330 }
319 331
320 /* First fill our array with ISA ports */ 332 /* Next, fill our array with ISA ports */
321 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { 333 for_each_node_by_type(np, "serial") {
322 struct device_node *isa = of_get_parent(np); 334 struct device_node *isa = of_get_parent(np);
323 if (isa && !strcmp(isa->name, "isa")) { 335 if (isa && !strcmp(isa->name, "isa")) {
324 index = add_legacy_isa_port(np, isa); 336 index = add_legacy_isa_port(np, isa);
@@ -328,29 +340,6 @@ void __init find_legacy_serial_ports(void)
328 of_node_put(isa); 340 of_node_put(isa);
329 } 341 }
330 342
331 /* First fill our array with tsi-bridge ports */
332 for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
333 struct device_node *tsi = of_get_parent(np);
334 if (tsi && !strcmp(tsi->type, "tsi-bridge")) {
335 index = add_legacy_soc_port(np, np);
336 if (index >= 0 && np == stdout)
337 legacy_serial_console = index;
338 }
339 of_node_put(tsi);
340 }
341
342 /* First fill our array with opb bus ports */
343 for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
344 struct device_node *opb = of_get_parent(np);
345 if (opb && (!strcmp(opb->type, "opb") ||
346 of_device_is_compatible(opb, "ibm,opb"))) {
347 index = add_legacy_soc_port(np, np);
348 if (index >= 0 && np == stdout)
349 legacy_serial_console = index;
350 }
351 of_node_put(opb);
352 }
353
354#ifdef CONFIG_PCI 343#ifdef CONFIG_PCI
355 /* Next, try to locate PCI ports */ 344 /* Next, try to locate PCI ports */
356 for (np = NULL; (np = of_find_all_nodes(np));) { 345 for (np = NULL; (np = of_find_all_nodes(np));) {
@@ -474,7 +463,7 @@ static int __init serial_dev_init(void)
474 463
475 /* 464 /*
476 * Before we register the platfrom serial devices, we need 465 * Before we register the platfrom serial devices, we need
477 * to fixup their interrutps and their IO ports. 466 * to fixup their interrupts and their IO ports.
478 */ 467 */
479 DBG("Fixing serial ports interrupts and IO ports ...\n"); 468 DBG("Fixing serial ports interrupts and IO ports ...\n");
480 469
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index ff781b2eddec..dcb89a88df46 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -41,7 +41,6 @@
41/* #define LPARCFG_DEBUG */ 41/* #define LPARCFG_DEBUG */
42 42
43static struct proc_dir_entry *proc_ppc64_lparcfg; 43static struct proc_dir_entry *proc_ppc64_lparcfg;
44#define LPARCFG_BUFF_SIZE 4096
45 44
46/* 45/*
47 * Track sum of all purrs across all processors. This is used to further 46 * Track sum of all purrs across all processors. This is used to further
@@ -595,13 +594,6 @@ int __init lparcfg_init(void)
595 ent = create_proc_entry("ppc64/lparcfg", mode, NULL); 594 ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
596 if (ent) { 595 if (ent) {
597 ent->proc_fops = &lparcfg_fops; 596 ent->proc_fops = &lparcfg_fops;
598 ent->data = kmalloc(LPARCFG_BUFF_SIZE, GFP_KERNEL);
599 if (!ent->data) {
600 printk(KERN_ERR
601 "Failed to allocate buffer for lparcfg\n");
602 remove_proc_entry("lparcfg", ent->parent);
603 return -ENOMEM;
604 }
605 } else { 597 } else {
606 printk(KERN_ERR "Failed to create ppc64/lparcfg\n"); 598 printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
607 return -EIO; 599 return -EIO;
@@ -613,10 +605,8 @@ int __init lparcfg_init(void)
613 605
614void __exit lparcfg_cleanup(void) 606void __exit lparcfg_cleanup(void)
615{ 607{
616 if (proc_ppc64_lparcfg) { 608 if (proc_ppc64_lparcfg)
617 kfree(proc_ppc64_lparcfg->data);
618 remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent); 609 remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
619 }
620} 610}
621 611
622module_init(lparcfg_init); 612module_init(lparcfg_init);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 330c9dc7db86..7b9160220698 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -8,12 +8,17 @@
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 * 10 *
11 * setjmp/longjmp code by Paul Mackerras.
12 *
11 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 14 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 15 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
15 */ 17 */
16#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
19#include <asm/unistd.h>
20#include <asm/asm-compat.h>
21#include <asm/asm-offsets.h>
17 22
18 .text 23 .text
19 24
@@ -43,3 +48,71 @@ _GLOBAL(add_reloc_offset)
43 add r3,r3,r5 48 add r3,r3,r5
44 mtlr r0 49 mtlr r0
45 blr 50 blr
51
52_GLOBAL(kernel_execve)
53 li r0,__NR_execve
54 sc
55 bnslr
56 neg r3,r3
57 blr
58
59_GLOBAL(setjmp)
60 mflr r0
61 PPC_STL r0,0(r3)
62 PPC_STL r1,SZL(r3)
63 PPC_STL r2,2*SZL(r3)
64 mfcr r0
65 PPC_STL r0,3*SZL(r3)
66 PPC_STL r13,4*SZL(r3)
67 PPC_STL r14,5*SZL(r3)
68 PPC_STL r15,6*SZL(r3)
69 PPC_STL r16,7*SZL(r3)
70 PPC_STL r17,8*SZL(r3)
71 PPC_STL r18,9*SZL(r3)
72 PPC_STL r19,10*SZL(r3)
73 PPC_STL r20,11*SZL(r3)
74 PPC_STL r21,12*SZL(r3)
75 PPC_STL r22,13*SZL(r3)
76 PPC_STL r23,14*SZL(r3)
77 PPC_STL r24,15*SZL(r3)
78 PPC_STL r25,16*SZL(r3)
79 PPC_STL r26,17*SZL(r3)
80 PPC_STL r27,18*SZL(r3)
81 PPC_STL r28,19*SZL(r3)
82 PPC_STL r29,20*SZL(r3)
83 PPC_STL r30,21*SZL(r3)
84 PPC_STL r31,22*SZL(r3)
85 li r3,0
86 blr
87
88_GLOBAL(longjmp)
89 PPC_LCMPI r4,0
90 bne 1f
91 li r4,1
921: PPC_LL r13,4*SZL(r3)
93 PPC_LL r14,5*SZL(r3)
94 PPC_LL r15,6*SZL(r3)
95 PPC_LL r16,7*SZL(r3)
96 PPC_LL r17,8*SZL(r3)
97 PPC_LL r18,9*SZL(r3)
98 PPC_LL r19,10*SZL(r3)
99 PPC_LL r20,11*SZL(r3)
100 PPC_LL r21,12*SZL(r3)
101 PPC_LL r22,13*SZL(r3)
102 PPC_LL r23,14*SZL(r3)
103 PPC_LL r24,15*SZL(r3)
104 PPC_LL r25,16*SZL(r3)
105 PPC_LL r26,17*SZL(r3)
106 PPC_LL r27,18*SZL(r3)
107 PPC_LL r28,19*SZL(r3)
108 PPC_LL r29,20*SZL(r3)
109 PPC_LL r30,21*SZL(r3)
110 PPC_LL r31,22*SZL(r3)
111 PPC_LL r0,3*SZL(r3)
112 mtcrf 0x38,r0
113 PPC_LL r0,0(r3)
114 PPC_LL r1,SZL(r3)
115 PPC_LL r2,2*SZL(r3)
116 mtlr r0
117 mr r3,r4
118 blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8b642ab26d37..5c2e253ddfb1 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -206,6 +206,45 @@ _GLOBAL(_nmask_and_or_msr)
206 isync 206 isync
207 blr /* Done */ 207 blr /* Done */
208 208
209#ifdef CONFIG_40x
210
211/*
212 * Do an IO access in real mode
213 */
214_GLOBAL(real_readb)
215 mfmsr r7
216 ori r0,r7,MSR_DR
217 xori r0,r0,MSR_DR
218 sync
219 mtmsr r0
220 sync
221 isync
222 lbz r3,0(r3)
223 sync
224 mtmsr r7
225 sync
226 isync
227 blr
228
229 /*
230 * Do an IO access in real mode
231 */
232_GLOBAL(real_writeb)
233 mfmsr r7
234 ori r0,r7,MSR_DR
235 xori r0,r0,MSR_DR
236 sync
237 mtmsr r0
238 sync
239 isync
240 stb r3,0(r4)
241 sync
242 mtmsr r7
243 sync
244 isync
245 blr
246
247#endif /* CONFIG_40x */
209 248
210/* 249/*
211 * Flush MMU TLB 250 * Flush MMU TLB
@@ -236,12 +275,6 @@ _GLOBAL(_tlbia)
236 /* Invalidate all entries in TLB1 */ 275 /* Invalidate all entries in TLB1 */
237 li r3, 0x0c 276 li r3, 0x0c
238 tlbivax 0,3 277 tlbivax 0,3
239 /* Invalidate all entries in TLB2 */
240 li r3, 0x14
241 tlbivax 0,3
242 /* Invalidate all entries in TLB3 */
243 li r3, 0x1c
244 tlbivax 0,3
245 msync 278 msync
246#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
247 tlbsync 280 tlbsync
@@ -336,12 +369,8 @@ _GLOBAL(_tlbie)
336#elif defined(CONFIG_FSL_BOOKE) 369#elif defined(CONFIG_FSL_BOOKE)
337 rlwinm r4, r3, 0, 0, 19 370 rlwinm r4, r3, 0, 0, 19
338 ori r5, r4, 0x08 /* TLBSEL = 1 */ 371 ori r5, r4, 0x08 /* TLBSEL = 1 */
339 ori r6, r4, 0x10 /* TLBSEL = 2 */
340 ori r7, r4, 0x18 /* TLBSEL = 3 */
341 tlbivax 0, r4 372 tlbivax 0, r4
342 tlbivax 0, r5 373 tlbivax 0, r5
343 tlbivax 0, r6
344 tlbivax 0, r7
345 msync 374 msync
346#if defined(CONFIG_SMP) 375#if defined(CONFIG_SMP)
347 tlbsync 376 tlbsync
@@ -793,13 +822,6 @@ _GLOBAL(kernel_thread)
793 addi r1,r1,16 822 addi r1,r1,16
794 blr 823 blr
795 824
796_GLOBAL(kernel_execve)
797 li r0,__NR_execve
798 sc
799 bnslr
800 neg r3,r3
801 blr
802
803/* 825/*
804 * This routine is just here to keep GCC happy - sigh... 826 * This routine is just here to keep GCC happy - sigh...
805 */ 827 */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index bbb3ba54c51c..a3c491e88a72 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -518,13 +518,6 @@ _GLOBAL(giveup_altivec)
518 518
519#endif /* CONFIG_ALTIVEC */ 519#endif /* CONFIG_ALTIVEC */
520 520
521_GLOBAL(kernel_execve)
522 li r0,__NR_execve
523 sc
524 bnslr
525 neg r3,r3
526 blr
527
528/* kexec_wait(phys_cpu) 521/* kexec_wait(phys_cpu)
529 * 522 *
530 * wait for the flag to change, indicating this kernel is going away but 523 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 07a89a398639..eab313858315 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/sort.h>
27 28
28#include "setup.h" 29#include "setup.h"
29 30
@@ -54,22 +55,60 @@ void module_free(struct module *mod, void *module_region)
54 addend) */ 55 addend) */
55static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) 56static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
56{ 57{
57 unsigned int i, j, ret = 0; 58 unsigned int i, r_info, r_addend, _count_relocs;
58 59
59 /* Sure, this is order(n^2), but it's usually short, and not 60 _count_relocs = 0;
60 time critical */ 61 r_info = 0;
61 for (i = 0; i < num; i++) { 62 r_addend = 0;
62 for (j = 0; j < i; j++) { 63 for (i = 0; i < num; i++)
63 /* If this addend appeared before, it's 64 /* Only count 24-bit relocs, others don't need stubs */
64 already been counted */ 65 if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
65 if (ELF32_R_SYM(rela[i].r_info) 66 (r_info != ELF32_R_SYM(rela[i].r_info) ||
66 == ELF32_R_SYM(rela[j].r_info) 67 r_addend != rela[i].r_addend)) {
67 && rela[i].r_addend == rela[j].r_addend) 68 _count_relocs++;
68 break; 69 r_info = ELF32_R_SYM(rela[i].r_info);
70 r_addend = rela[i].r_addend;
69 } 71 }
70 if (j == i) ret++; 72
73 return _count_relocs;
74}
75
76static int relacmp(const void *_x, const void *_y)
77{
78 const Elf32_Rela *x, *y;
79
80 y = (Elf32_Rela *)_x;
81 x = (Elf32_Rela *)_y;
82
83 /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
84 * make the comparison cheaper/faster. It won't affect the sorting or
85 * the counting algorithms' performance
86 */
87 if (x->r_info < y->r_info)
88 return -1;
89 else if (x->r_info > y->r_info)
90 return 1;
91 else if (x->r_addend < y->r_addend)
92 return -1;
93 else if (x->r_addend > y->r_addend)
94 return 1;
95 else
96 return 0;
97}
98
99static void relaswap(void *_x, void *_y, int size)
100{
101 uint32_t *x, *y, tmp;
102 int i;
103
104 y = (uint32_t *)_x;
105 x = (uint32_t *)_y;
106
107 for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
108 tmp = x[i];
109 x[i] = y[i];
110 y[i] = tmp;
71 } 111 }
72 return ret;
73} 112}
74 113
75/* Get the potential trampolines size required of the init and 114/* Get the potential trampolines size required of the init and
@@ -100,6 +139,16 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
100 DEBUGP("Ptr: %p. Number: %u\n", 139 DEBUGP("Ptr: %p. Number: %u\n",
101 (void *)hdr + sechdrs[i].sh_offset, 140 (void *)hdr + sechdrs[i].sh_offset,
102 sechdrs[i].sh_size / sizeof(Elf32_Rela)); 141 sechdrs[i].sh_size / sizeof(Elf32_Rela));
142
143 /* Sort the relocation information based on a symbol and
144 * addend key. This is a stable O(n*log n) complexity
145 * alogrithm but it will reduce the complexity of
146 * count_relocs() to linear complexity O(n)
147 */
148 sort((void *)hdr + sechdrs[i].sh_offset,
149 sechdrs[i].sh_size / sizeof(Elf32_Rela),
150 sizeof(Elf32_Rela), relacmp, relaswap);
151
103 ret += count_relocs((void *)hdr 152 ret += count_relocs((void *)hdr
104 + sechdrs[i].sh_offset, 153 + sechdrs[i].sh_offset,
105 sechdrs[i].sh_size 154 sechdrs[i].sh_size
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 75c7c4f19280..3a82b02b784b 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -24,6 +24,7 @@
24#include <asm/module.h> 24#include <asm/module.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/firmware.h> 26#include <asm/firmware.h>
27#include <linux/sort.h>
27 28
28#include "setup.h" 29#include "setup.h"
29 30
@@ -81,25 +82,23 @@ static struct ppc64_stub_entry ppc64_stub =
81 different addend) */ 82 different addend) */
82static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) 83static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
83{ 84{
84 unsigned int i, j, ret = 0; 85 unsigned int i, r_info, r_addend, _count_relocs;
85 86
86 /* FIXME: Only count external ones --RR */ 87 /* FIXME: Only count external ones --RR */
87 /* Sure, this is order(n^2), but it's usually short, and not 88 _count_relocs = 0;
88 time critical */ 89 r_info = 0;
89 for (i = 0; i < num; i++) { 90 r_addend = 0;
91 for (i = 0; i < num; i++)
90 /* Only count 24-bit relocs, others don't need stubs */ 92 /* Only count 24-bit relocs, others don't need stubs */
91 if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24) 93 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
92 continue; 94 (r_info != ELF64_R_SYM(rela[i].r_info) ||
93 for (j = 0; j < i; j++) { 95 r_addend != rela[i].r_addend)) {
94 /* If this addend appeared before, it's 96 _count_relocs++;
95 already been counted */ 97 r_info = ELF64_R_SYM(rela[i].r_info);
96 if (rela[i].r_info == rela[j].r_info 98 r_addend = rela[i].r_addend;
97 && rela[i].r_addend == rela[j].r_addend)
98 break;
99 } 99 }
100 if (j == i) ret++; 100
101 } 101 return _count_relocs;
102 return ret;
103} 102}
104 103
105void *module_alloc(unsigned long size) 104void *module_alloc(unsigned long size)
@@ -118,6 +117,44 @@ void module_free(struct module *mod, void *module_region)
118 table entries. */ 117 table entries. */
119} 118}
120 119
120static int relacmp(const void *_x, const void *_y)
121{
122 const Elf64_Rela *x, *y;
123
124 y = (Elf64_Rela *)_x;
125 x = (Elf64_Rela *)_y;
126
127 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
128 * make the comparison cheaper/faster. It won't affect the sorting or
129 * the counting algorithms' performance
130 */
131 if (x->r_info < y->r_info)
132 return -1;
133 else if (x->r_info > y->r_info)
134 return 1;
135 else if (x->r_addend < y->r_addend)
136 return -1;
137 else if (x->r_addend > y->r_addend)
138 return 1;
139 else
140 return 0;
141}
142
143static void relaswap(void *_x, void *_y, int size)
144{
145 uint64_t *x, *y, tmp;
146 int i;
147
148 y = (uint64_t *)_x;
149 x = (uint64_t *)_y;
150
151 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
152 tmp = x[i];
153 x[i] = y[i];
154 y[i] = tmp;
155 }
156}
157
121/* Get size of potential trampolines required. */ 158/* Get size of potential trampolines required. */
122static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, 159static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
123 const Elf64_Shdr *sechdrs) 160 const Elf64_Shdr *sechdrs)
@@ -133,6 +170,16 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
133 DEBUGP("Ptr: %p. Number: %lu\n", 170 DEBUGP("Ptr: %p. Number: %lu\n",
134 (void *)sechdrs[i].sh_addr, 171 (void *)sechdrs[i].sh_addr,
135 sechdrs[i].sh_size / sizeof(Elf64_Rela)); 172 sechdrs[i].sh_size / sizeof(Elf64_Rela));
173
174 /* Sort the relocation information based on a symbol and
175 * addend key. This is a stable O(n*log n) complexity
176 * alogrithm but it will reduce the complexity of
177 * count_relocs() to linear complexity O(n)
178 */
179 sort((void *)sechdrs[i].sh_addr,
180 sechdrs[i].sh_size / sizeof(Elf64_Rela),
181 sizeof(Elf64_Rela), relacmp, relaswap);
182
136 relocs += count_relocs((void *)sechdrs[i].sh_addr, 183 relocs += count_relocs((void *)sechdrs[i].sh_addr,
137 sechdrs[i].sh_size 184 sechdrs[i].sh_size
138 / sizeof(Elf64_Rela)); 185 / sizeof(Elf64_Rela));
@@ -343,7 +390,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
343 /* Simply set it */ 390 /* Simply set it */
344 *(u32 *)location = value; 391 *(u32 *)location = value;
345 break; 392 break;
346 393
347 case R_PPC64_ADDR64: 394 case R_PPC64_ADDR64:
348 /* Simply set it */ 395 /* Simply set it */
349 *(unsigned long *)location = value; 396 *(unsigned long *)location = value;
@@ -399,7 +446,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
399 } 446 }
400 447
401 /* Only replace bits 2 through 26 */ 448 /* Only replace bits 2 through 26 */
402 *(uint32_t *)location 449 *(uint32_t *)location
403 = (*(uint32_t *)location & ~0x03fffffc) 450 = (*(uint32_t *)location & ~0x03fffffc)
404 | (value & 0x03fffffc); 451 | (value & 0x03fffffc);
405 break; 452 break;
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 3388ad619996..5748ddb47d9f 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -5,10 +5,10 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/of_device.h>
8 9
9#include <asm/errno.h> 10#include <asm/errno.h>
10#include <asm/dcr.h> 11#include <asm/dcr.h>
11#include <asm/of_device.h>
12 12
13static void of_device_make_bus_id(struct of_device *dev) 13static void of_device_make_bus_id(struct of_device *dev)
14{ 14{
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index aeaa20268ce2..fb698d47082d 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -19,6 +19,7 @@
19#include <linux/mod_devicetable.h> 19#include <linux/mod_devicetable.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/of.h>
22#include <linux/of_device.h> 23#include <linux/of_device.h>
23#include <linux/of_platform.h> 24#include <linux/of_platform.h>
24 25
@@ -40,7 +41,7 @@
40 * a bus type in the list 41 * a bus type in the list
41 */ 42 */
42 43
43static struct of_device_id of_default_bus_ids[] = { 44static const struct of_device_id of_default_bus_ids[] = {
44 { .type = "soc", }, 45 { .type = "soc", },
45 { .compatible = "soc", }, 46 { .compatible = "soc", },
46 { .type = "spider", }, 47 { .type = "spider", },
@@ -64,26 +65,6 @@ static int __init of_bus_driver_init(void)
64 65
65postcore_initcall(of_bus_driver_init); 66postcore_initcall(of_bus_driver_init);
66 67
67int of_register_platform_driver(struct of_platform_driver *drv)
68{
69 /* initialize common driver fields */
70 if (!drv->driver.name)
71 drv->driver.name = drv->name;
72 if (!drv->driver.owner)
73 drv->driver.owner = drv->owner;
74 drv->driver.bus = &of_platform_bus_type;
75
76 /* register with core */
77 return driver_register(&drv->driver);
78}
79EXPORT_SYMBOL(of_register_platform_driver);
80
81void of_unregister_platform_driver(struct of_platform_driver *drv)
82{
83 driver_unregister(&drv->driver);
84}
85EXPORT_SYMBOL(of_unregister_platform_driver);
86
87struct of_device* of_platform_device_create(struct device_node *np, 68struct of_device* of_platform_device_create(struct device_node *np,
88 const char *bus_id, 69 const char *bus_id,
89 struct device *parent) 70 struct device *parent)
@@ -120,15 +101,15 @@ EXPORT_SYMBOL(of_platform_device_create);
120 * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to 101 * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
121 * disallow recursive creation of child busses 102 * disallow recursive creation of child busses
122 */ 103 */
123static int of_platform_bus_create(struct device_node *bus, 104static int of_platform_bus_create(const struct device_node *bus,
124 struct of_device_id *matches, 105 const struct of_device_id *matches,
125 struct device *parent) 106 struct device *parent)
126{ 107{
127 struct device_node *child; 108 struct device_node *child;
128 struct of_device *dev; 109 struct of_device *dev;
129 int rc = 0; 110 int rc = 0;
130 111
131 for (child = NULL; (child = of_get_next_child(bus, child)); ) { 112 for_each_child_of_node(bus, child) {
132 pr_debug(" create child: %s\n", child->full_name); 113 pr_debug(" create child: %s\n", child->full_name);
133 dev = of_platform_device_create(child, NULL, parent); 114 dev = of_platform_device_create(child, NULL, parent);
134 if (dev == NULL) 115 if (dev == NULL)
@@ -157,7 +138,7 @@ static int of_platform_bus_create(struct device_node *bus,
157 */ 138 */
158 139
159int of_platform_bus_probe(struct device_node *root, 140int of_platform_bus_probe(struct device_node *root,
160 struct of_device_id *matches, 141 const struct of_device_id *matches,
161 struct device *parent) 142 struct device *parent)
162{ 143{
163 struct device_node *child; 144 struct device_node *child;
@@ -190,7 +171,7 @@ int of_platform_bus_probe(struct device_node *root,
190 rc = of_platform_bus_create(root, matches, &dev->dev); 171 rc = of_platform_bus_create(root, matches, &dev->dev);
191 goto bail; 172 goto bail;
192 } 173 }
193 for (child = NULL; (child = of_get_next_child(root, child)); ) { 174 for_each_child_of_node(root, child) {
194 if (!of_match_node(matches, child)) 175 if (!of_match_node(matches, child))
195 continue; 176 continue;
196 177
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2ae3b6f778a3..980fe32895c0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -48,32 +48,26 @@
48static DEFINE_SPINLOCK(hose_spinlock); 48static DEFINE_SPINLOCK(hose_spinlock);
49 49
50/* XXX kill that some day ... */ 50/* XXX kill that some day ... */
51int global_phb_number; /* Global phb counter */ 51static int global_phb_number; /* Global phb counter */
52 52
53extern struct list_head hose_list; 53/* ISA Memory physical address */
54resource_size_t isa_mem_base;
54 55
55/* 56/* Default PCI flags is 0 */
56 * pci_controller(phb) initialized common variables. 57unsigned int ppc_pci_flags;
57 */
58static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
59{
60 memset(hose, 0, sizeof(struct pci_controller));
61
62 spin_lock(&hose_spinlock);
63 hose->global_number = global_phb_number++;
64 list_add_tail(&hose->list_node, &hose_list);
65 spin_unlock(&hose_spinlock);
66}
67 58
68struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 59struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
69{ 60{
70 struct pci_controller *phb; 61 struct pci_controller *phb;
71 62
72 phb = alloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); 63 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
73 if (phb == NULL) 64 if (phb == NULL)
74 return NULL; 65 return NULL;
75 pci_setup_pci_controller(phb); 66 spin_lock(&hose_spinlock);
76 phb->arch_data = dev; 67 phb->global_number = global_phb_number++;
68 list_add_tail(&phb->list_node, &hose_list);
69 spin_unlock(&hose_spinlock);
70 phb->dn = dev;
77 phb->is_dynamic = mem_init_done; 71 phb->is_dynamic = mem_init_done;
78#ifdef CONFIG_PPC64 72#ifdef CONFIG_PPC64
79 if (dev) { 73 if (dev) {
@@ -126,15 +120,10 @@ int pcibios_vaddr_is_ioport(void __iomem *address)
126 */ 120 */
127int pci_domain_nr(struct pci_bus *bus) 121int pci_domain_nr(struct pci_bus *bus)
128{ 122{
129 if (firmware_has_feature(FW_FEATURE_ISERIES)) 123 struct pci_controller *hose = pci_bus_to_host(bus);
130 return 0;
131 else {
132 struct pci_controller *hose = pci_bus_to_host(bus);
133 124
134 return hose->global_number; 125 return hose->global_number;
135 }
136} 126}
137
138EXPORT_SYMBOL(pci_domain_nr); 127EXPORT_SYMBOL(pci_domain_nr);
139 128
140#ifdef CONFIG_PPC_OF 129#ifdef CONFIG_PPC_OF
@@ -153,7 +142,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
153 while(node) { 142 while(node) {
154 struct pci_controller *hose, *tmp; 143 struct pci_controller *hose, *tmp;
155 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 144 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
156 if (hose->arch_data == node) 145 if (hose->dn == node)
157 return hose; 146 return hose;
158 node = node->parent; 147 node = node->parent;
159 } 148 }
@@ -201,6 +190,20 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
201 struct of_irq oirq; 190 struct of_irq oirq;
202 unsigned int virq; 191 unsigned int virq;
203 192
193 /* The current device-tree that iSeries generates from the HV
194 * PCI informations doesn't contain proper interrupt routing,
195 * and all the fallback would do is print out crap, so we
196 * don't attempt to resolve the interrupts here at all, some
197 * iSeries specific fixup does it.
198 *
199 * In the long run, we will hopefully fix the generated device-tree
200 * instead.
201 */
202#ifdef CONFIG_PPC_ISERIES
203 if (firmware_has_feature(FW_FEATURE_ISERIES))
204 return -1;
205#endif
206
204 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 207 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
205 208
206#ifdef DEBUG 209#ifdef DEBUG
@@ -222,10 +225,11 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
222 if (pin == 0) 225 if (pin == 0)
223 return -1; 226 return -1;
224 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 227 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
225 line == 0xff) { 228 line == 0xff || line == 0) {
226 return -1; 229 return -1;
227 } 230 }
228 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 231 DBG(" -> no map ! Using line %d (pin %d) from PCI config\n",
232 line, pin);
229 233
230 virq = irq_create_mapping(NULL, line); 234 virq = irq_create_mapping(NULL, line);
231 if (virq != NO_IRQ) 235 if (virq != NO_IRQ)
@@ -475,3 +479,717 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
475 *start = rsrc->start - offset; 479 *start = rsrc->start - offset;
476 *end = rsrc->end - offset; 480 *end = rsrc->end - offset;
477} 481}
482
483/**
484 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
485 * @hose: newly allocated pci_controller to be setup
486 * @dev: device node of the host bridge
487 * @primary: set if primary bus (32 bits only, soon to be deprecated)
488 *
489 * This function will parse the "ranges" property of a PCI host bridge device
490 * node and setup the resource mapping of a pci controller based on its
491 * content.
492 *
493 * Life would be boring if it wasn't for a few issues that we have to deal
494 * with here:
495 *
496 * - We can only cope with one IO space range and up to 3 Memory space
497 * ranges. However, some machines (thanks Apple !) tend to split their
498 * space into lots of small contiguous ranges. So we have to coalesce.
499 *
500 * - We can only cope with all memory ranges having the same offset
501 * between CPU addresses and PCI addresses. Unfortunately, some bridges
502 * are setup for a large 1:1 mapping along with a small "window" which
503 * maps PCI address 0 to some arbitrary high address of the CPU space in
504 * order to give access to the ISA memory hole.
505 * The way out of here that I've chosen for now is to always set the
506 * offset based on the first resource found, then override it if we
507 * have a different offset and the previous was set by an ISA hole.
508 *
509 * - Some busses have IO space not starting at 0, which causes trouble with
510 * the way we do our IO resource renumbering. The code somewhat deals with
511 * it for 64 bits but I would expect problems on 32 bits.
512 *
513 * - Some 32 bits platforms such as 4xx can have physical space larger than
514 * 32 bits so we need to use 64 bits values for the parsing
515 */
516void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
517 struct device_node *dev,
518 int primary)
519{
520 const u32 *ranges;
521 int rlen;
522 int pna = of_n_addr_cells(dev);
523 int np = pna + 5;
524 int memno = 0, isa_hole = -1;
525 u32 pci_space;
526 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
527 unsigned long long isa_mb = 0;
528 struct resource *res;
529
530 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
531 dev->full_name, primary ? "(primary)" : "");
532
533 /* Get ranges property */
534 ranges = of_get_property(dev, "ranges", &rlen);
535 if (ranges == NULL)
536 return;
537
538 /* Parse it */
539 while ((rlen -= np * 4) >= 0) {
540 /* Read next ranges element */
541 pci_space = ranges[0];
542 pci_addr = of_read_number(ranges + 1, 2);
543 cpu_addr = of_translate_address(dev, ranges + 3);
544 size = of_read_number(ranges + pna + 3, 2);
545 ranges += np;
546 if (cpu_addr == OF_BAD_ADDR || size == 0)
547 continue;
548
549 /* Now consume following elements while they are contiguous */
550 for (; rlen >= np * sizeof(u32);
551 ranges += np, rlen -= np * 4) {
552 if (ranges[0] != pci_space)
553 break;
554 pci_next = of_read_number(ranges + 1, 2);
555 cpu_next = of_translate_address(dev, ranges + 3);
556 if (pci_next != pci_addr + size ||
557 cpu_next != cpu_addr + size)
558 break;
559 size += of_read_number(ranges + pna + 3, 2);
560 }
561
562 /* Act based on address space type */
563 res = NULL;
564 switch ((pci_space >> 24) & 0x3) {
565 case 1: /* PCI IO space */
566 printk(KERN_INFO
567 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
568 cpu_addr, cpu_addr + size - 1, pci_addr);
569
570 /* We support only one IO range */
571 if (hose->pci_io_size) {
572 printk(KERN_INFO
573 " \\--> Skipped (too many) !\n");
574 continue;
575 }
576#ifdef CONFIG_PPC32
577 /* On 32 bits, limit I/O space to 16MB */
578 if (size > 0x01000000)
579 size = 0x01000000;
580
581 /* 32 bits needs to map IOs here */
582 hose->io_base_virt = ioremap(cpu_addr, size);
583
584 /* Expect trouble if pci_addr is not 0 */
585 if (primary)
586 isa_io_base =
587 (unsigned long)hose->io_base_virt;
588#endif /* CONFIG_PPC32 */
589 /* pci_io_size and io_base_phys always represent IO
590 * space starting at 0 so we factor in pci_addr
591 */
592 hose->pci_io_size = pci_addr + size;
593 hose->io_base_phys = cpu_addr - pci_addr;
594
595 /* Build resource */
596 res = &hose->io_resource;
597 res->flags = IORESOURCE_IO;
598 res->start = pci_addr;
599 break;
600 case 2: /* PCI Memory space */
601 printk(KERN_INFO
602 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
603 cpu_addr, cpu_addr + size - 1, pci_addr,
604 (pci_space & 0x40000000) ? "Prefetch" : "");
605
606 /* We support only 3 memory ranges */
607 if (memno >= 3) {
608 printk(KERN_INFO
609 " \\--> Skipped (too many) !\n");
610 continue;
611 }
612 /* Handles ISA memory hole space here */
613 if (pci_addr == 0) {
614 isa_mb = cpu_addr;
615 isa_hole = memno;
616 if (primary || isa_mem_base == 0)
617 isa_mem_base = cpu_addr;
618 }
619
620 /* We get the PCI/Mem offset from the first range or
621 * the, current one if the offset came from an ISA
622 * hole. If they don't match, bugger.
623 */
624 if (memno == 0 ||
625 (isa_hole >= 0 && pci_addr != 0 &&
626 hose->pci_mem_offset == isa_mb))
627 hose->pci_mem_offset = cpu_addr - pci_addr;
628 else if (pci_addr != 0 &&
629 hose->pci_mem_offset != cpu_addr - pci_addr) {
630 printk(KERN_INFO
631 " \\--> Skipped (offset mismatch) !\n");
632 continue;
633 }
634
635 /* Build resource */
636 res = &hose->mem_resources[memno++];
637 res->flags = IORESOURCE_MEM;
638 if (pci_space & 0x40000000)
639 res->flags |= IORESOURCE_PREFETCH;
640 res->start = cpu_addr;
641 break;
642 }
643 if (res != NULL) {
644 res->name = dev->full_name;
645 res->end = res->start + size - 1;
646 res->parent = NULL;
647 res->sibling = NULL;
648 res->child = NULL;
649 }
650 }
651
652 /* Out of paranoia, let's put the ISA hole last if any */
653 if (isa_hole >= 0 && memno > 0 && isa_hole != (memno-1)) {
654 struct resource tmp = hose->mem_resources[isa_hole];
655 hose->mem_resources[isa_hole] = hose->mem_resources[memno-1];
656 hose->mem_resources[memno-1] = tmp;
657 }
658}
659
660/* Decide whether to display the domain number in /proc */
661int pci_proc_domain(struct pci_bus *bus)
662{
663 struct pci_controller *hose = pci_bus_to_host(bus);
664#ifdef CONFIG_PPC64
665 return hose->buid != 0;
666#else
667 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
668 return 0;
669 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
670 return hose->global_number != 0;
671 return 1;
672#endif
673}
674
675void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
676 struct resource *res)
677{
678 resource_size_t offset = 0, mask = (resource_size_t)-1;
679 struct pci_controller *hose = pci_bus_to_host(dev->bus);
680
681 if (!hose)
682 return;
683 if (res->flags & IORESOURCE_IO) {
684 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
685 mask = 0xffffffffu;
686 } else if (res->flags & IORESOURCE_MEM)
687 offset = hose->pci_mem_offset;
688
689 region->start = (res->start - offset) & mask;
690 region->end = (res->end - offset) & mask;
691}
692EXPORT_SYMBOL(pcibios_resource_to_bus);
693
694void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
695 struct pci_bus_region *region)
696{
697 resource_size_t offset = 0, mask = (resource_size_t)-1;
698 struct pci_controller *hose = pci_bus_to_host(dev->bus);
699
700 if (!hose)
701 return;
702 if (res->flags & IORESOURCE_IO) {
703 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
704 mask = 0xffffffffu;
705 } else if (res->flags & IORESOURCE_MEM)
706 offset = hose->pci_mem_offset;
707 res->start = (region->start + offset) & mask;
708 res->end = (region->end + offset) & mask;
709}
710EXPORT_SYMBOL(pcibios_bus_to_resource);
711
712/* Fixup a bus resource into a linux resource */
713static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
714{
715 struct pci_controller *hose = pci_bus_to_host(dev->bus);
716 resource_size_t offset = 0, mask = (resource_size_t)-1;
717
718 if (res->flags & IORESOURCE_IO) {
719 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
720 mask = 0xffffffffu;
721 } else if (res->flags & IORESOURCE_MEM)
722 offset = hose->pci_mem_offset;
723
724 res->start = (res->start + offset) & mask;
725 res->end = (res->end + offset) & mask;
726
727 pr_debug("PCI:%s %016llx-%016llx\n",
728 pci_name(dev),
729 (unsigned long long)res->start,
730 (unsigned long long)res->end);
731}
732
733
734/* This header fixup will do the resource fixup for all devices as they are
735 * probed, but not for bridge ranges
736 */
737static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
738{
739 struct pci_controller *hose = pci_bus_to_host(dev->bus);
740 int i;
741
742 if (!hose) {
743 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
744 pci_name(dev));
745 return;
746 }
747 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
748 struct resource *res = dev->resource + i;
749 if (!res->flags)
750 continue;
751 if (res->end == 0xffffffff) {
752 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
753 pci_name(dev), i,
754 (unsigned long long)res->start,
755 (unsigned long long)res->end,
756 (unsigned int)res->flags);
757 res->end -= res->start;
758 res->start = 0;
759 res->flags |= IORESOURCE_UNSET;
760 continue;
761 }
762
763 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
764 pci_name(dev), i,
765 (unsigned long long)res->start,\
766 (unsigned long long)res->end,
767 (unsigned int)res->flags);
768
769 fixup_resource(res, dev);
770 }
771
772 /* Call machine specific resource fixup */
773 if (ppc_md.pcibios_fixup_resources)
774 ppc_md.pcibios_fixup_resources(dev);
775}
776DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
777
778static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
779{
780 struct pci_controller *hose = pci_bus_to_host(bus);
781 struct pci_dev *dev = bus->self;
782
783 pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
784
785 /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
786 * now differently between 32 and 64 bits.
787 */
788 if (dev != NULL) {
789 struct resource *res;
790 int i;
791
792 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
793 if ((res = bus->resource[i]) == NULL)
794 continue;
795 if (!res->flags)
796 continue;
797 if (i >= 3 && bus->self->transparent)
798 continue;
799 /* On PowerMac, Apple leaves bridge windows open over
800 * an inaccessible region of memory space (0...fffff)
801 * which is somewhat bogus, but that's what they think
802 * means disabled...
803 *
804 * We clear those to force them to be reallocated later
805 *
806 * We detect such regions by the fact that the base is
807 * equal to the pci_mem_offset of the host bridge and
808 * their size is smaller than 1M.
809 */
810 if (res->flags & IORESOURCE_MEM &&
811 res->start == hose->pci_mem_offset &&
812 res->end < 0x100000) {
813 printk(KERN_INFO
814 "PCI: Closing bogus Apple Firmware"
815 " region %d on bus 0x%02x\n",
816 i, bus->number);
817 res->flags = 0;
818 continue;
819 }
820
821 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
822 pci_name(dev), i,
823 (unsigned long long)res->start,\
824 (unsigned long long)res->end,
825 (unsigned int)res->flags);
826
827 fixup_resource(res, dev);
828 }
829 }
830
831 /* Additional setup that is different between 32 and 64 bits for now */
832 pcibios_do_bus_setup(bus);
833
834 /* Platform specific bus fixups */
835 if (ppc_md.pcibios_fixup_bus)
836 ppc_md.pcibios_fixup_bus(bus);
837
838 /* Read default IRQs and fixup if necessary */
839 list_for_each_entry(dev, &bus->devices, bus_list) {
840 pci_read_irq_line(dev);
841 if (ppc_md.pci_irq_fixup)
842 ppc_md.pci_irq_fixup(dev);
843 }
844}
845
846void __devinit pcibios_fixup_bus(struct pci_bus *bus)
847{
848 /* When called from the generic PCI probe, read PCI<->PCI bridge
849 * bases before proceeding
850 */
851 if (bus->self != NULL)
852 pci_read_bridge_bases(bus);
853 __pcibios_fixup_bus(bus);
854}
855EXPORT_SYMBOL(pcibios_fixup_bus);
856
857/* When building a bus from the OF tree rather than probing, we need a
858 * slightly different version of the fixup which doesn't read the
859 * bridge bases using config space accesses
860 */
861void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus)
862{
863 __pcibios_fixup_bus(bus);
864}
865
866static int skip_isa_ioresource_align(struct pci_dev *dev)
867{
868 if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
869 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
870 return 1;
871 return 0;
872}
873
874/*
875 * We need to avoid collisions with `mirrored' VGA ports
876 * and other strange ISA hardware, so we always want the
877 * addresses to be allocated in the 0x000-0x0ff region
878 * modulo 0x400.
879 *
880 * Why? Because some silly external IO cards only decode
881 * the low 10 bits of the IO address. The 0x00-0xff region
882 * is reserved for motherboard devices that decode all 16
883 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
884 * but we want to try to avoid allocating at 0x2900-0x2bff
885 * which might have be mirrored at 0x0100-0x03ff..
886 */
887void pcibios_align_resource(void *data, struct resource *res,
888 resource_size_t size, resource_size_t align)
889{
890 struct pci_dev *dev = data;
891
892 if (res->flags & IORESOURCE_IO) {
893 resource_size_t start = res->start;
894
895 if (skip_isa_ioresource_align(dev))
896 return;
897 if (start & 0x300) {
898 start = (start + 0x3ff) & ~0x3ff;
899 res->start = start;
900 }
901 }
902}
903EXPORT_SYMBOL(pcibios_align_resource);
904
905/*
906 * Reparent resource children of pr that conflict with res
907 * under res, and make res replace those children.
908 */
909static int __init reparent_resources(struct resource *parent,
910 struct resource *res)
911{
912 struct resource *p, **pp;
913 struct resource **firstpp = NULL;
914
915 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
916 if (p->end < res->start)
917 continue;
918 if (res->end < p->start)
919 break;
920 if (p->start < res->start || p->end > res->end)
921 return -1; /* not completely contained */
922 if (firstpp == NULL)
923 firstpp = pp;
924 }
925 if (firstpp == NULL)
926 return -1; /* didn't find any conflicting entries? */
927 res->parent = parent;
928 res->child = *firstpp;
929 res->sibling = *pp;
930 *firstpp = res;
931 *pp = NULL;
932 for (p = res->child; p != NULL; p = p->sibling) {
933 p->parent = res;
934 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
935 p->name,
936 (unsigned long long)p->start,
937 (unsigned long long)p->end, res->name);
938 }
939 return 0;
940}
941
942/*
943 * Handle resources of PCI devices. If the world were perfect, we could
944 * just allocate all the resource regions and do nothing more. It isn't.
945 * On the other hand, we cannot just re-allocate all devices, as it would
946 * require us to know lots of host bridge internals. So we attempt to
947 * keep as much of the original configuration as possible, but tweak it
948 * when it's found to be wrong.
949 *
950 * Known BIOS problems we have to work around:
951 * - I/O or memory regions not configured
952 * - regions configured, but not enabled in the command register
953 * - bogus I/O addresses above 64K used
954 * - expansion ROMs left enabled (this may sound harmless, but given
955 * the fact the PCI specs explicitly allow address decoders to be
956 * shared between expansion ROMs and other resource regions, it's
957 * at least dangerous)
958 *
959 * Our solution:
960 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
961 * This gives us fixed barriers on where we can allocate.
962 * (2) Allocate resources for all enabled devices. If there is
963 * a collision, just mark the resource as unallocated. Also
964 * disable expansion ROMs during this step.
965 * (3) Try to allocate resources for disabled devices. If the
966 * resources were assigned correctly, everything goes well,
967 * if they weren't, they won't disturb allocation of other
968 * resources.
969 * (4) Assign new addresses to resources which were either
970 * not configured at all or misconfigured. If explicitly
971 * requested by the user, configure expansion ROM address
972 * as well.
973 */
974
975static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
976{
977 struct pci_bus *bus;
978 int i;
979 struct resource *res, *pr;
980
981 /* Depth-First Search on bus tree */
982 list_for_each_entry(bus, bus_list, node) {
983 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
984 if ((res = bus->resource[i]) == NULL || !res->flags
985 || res->start > res->end)
986 continue;
987 if (bus->parent == NULL)
988 pr = (res->flags & IORESOURCE_IO) ?
989 &ioport_resource : &iomem_resource;
990 else {
991 /* Don't bother with non-root busses when
992 * re-assigning all resources. We clear the
993 * resource flags as if they were colliding
994 * and as such ensure proper re-allocation
995 * later.
996 */
997 if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)
998 goto clear_resource;
999 pr = pci_find_parent_resource(bus->self, res);
1000 if (pr == res) {
1001 /* this happens when the generic PCI
1002 * code (wrongly) decides that this
1003 * bridge is transparent -- paulus
1004 */
1005 continue;
1006 }
1007 }
1008
1009 DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1010 "[0x%x], parent %p (%s)\n",
1011 bus->self ? pci_name(bus->self) : "PHB",
1012 bus->number, i,
1013 (unsigned long long)res->start,
1014 (unsigned long long)res->end,
1015 (unsigned int)res->flags,
1016 pr, (pr && pr->name) ? pr->name : "nil");
1017
1018 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1019 if (request_resource(pr, res) == 0)
1020 continue;
1021 /*
1022 * Must be a conflict with an existing entry.
1023 * Move that entry (or entries) under the
1024 * bridge resource and try again.
1025 */
1026 if (reparent_resources(pr, res) == 0)
1027 continue;
1028 }
1029 printk(KERN_WARNING
1030 "PCI: Cannot allocate resource region "
1031 "%d of PCI bridge %d, will remap\n",
1032 i, bus->number);
1033clear_resource:
1034 res->flags = 0;
1035 }
1036 pcibios_allocate_bus_resources(&bus->children);
1037 }
1038}
1039
1040static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1041{
1042 struct resource *pr, *r = &dev->resource[idx];
1043
1044 DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1045 pci_name(dev), idx,
1046 (unsigned long long)r->start,
1047 (unsigned long long)r->end,
1048 (unsigned int)r->flags);
1049
1050 pr = pci_find_parent_resource(dev, r);
1051 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1052 request_resource(pr, r) < 0) {
1053 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1054 " of device %s, will remap\n", idx, pci_name(dev));
1055 if (pr)
1056 DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr,
1057 (unsigned long long)pr->start,
1058 (unsigned long long)pr->end,
1059 (unsigned int)pr->flags);
1060 /* We'll assign a new address later */
1061 r->flags |= IORESOURCE_UNSET;
1062 r->end -= r->start;
1063 r->start = 0;
1064 }
1065}
1066
1067static void __init pcibios_allocate_resources(int pass)
1068{
1069 struct pci_dev *dev = NULL;
1070 int idx, disabled;
1071 u16 command;
1072 struct resource *r;
1073
1074 for_each_pci_dev(dev) {
1075 pci_read_config_word(dev, PCI_COMMAND, &command);
1076 for (idx = 0; idx < 6; idx++) {
1077 r = &dev->resource[idx];
1078 if (r->parent) /* Already allocated */
1079 continue;
1080 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1081 continue; /* Not assigned at all */
1082 if (r->flags & IORESOURCE_IO)
1083 disabled = !(command & PCI_COMMAND_IO);
1084 else
1085 disabled = !(command & PCI_COMMAND_MEMORY);
1086 if (pass == disabled)
1087 alloc_resource(dev, idx);
1088 }
1089 if (pass)
1090 continue;
1091 r = &dev->resource[PCI_ROM_RESOURCE];
1092 if (r->flags & IORESOURCE_ROM_ENABLE) {
1093 /* Turn the ROM off, leave the resource region,
1094 * but keep it unregistered.
1095 */
1096 u32 reg;
1097 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
1098 r->flags &= ~IORESOURCE_ROM_ENABLE;
1099 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1100 pci_write_config_dword(dev, dev->rom_base_reg,
1101 reg & ~PCI_ROM_ADDRESS_ENABLE);
1102 }
1103 }
1104}
1105
1106void __init pcibios_resource_survey(void)
1107{
1108 /* Allocate and assign resources. If we re-assign everything, then
1109 * we skip the allocate phase
1110 */
1111 pcibios_allocate_bus_resources(&pci_root_buses);
1112
1113 if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) {
1114 pcibios_allocate_resources(0);
1115 pcibios_allocate_resources(1);
1116 }
1117
1118 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
1119 DBG("PCI: Assigning unassigned resouces...\n");
1120 pci_assign_unassigned_resources();
1121 }
1122
1123 /* Call machine dependent fixup */
1124 if (ppc_md.pcibios_fixup)
1125 ppc_md.pcibios_fixup();
1126}
1127
1128#ifdef CONFIG_HOTPLUG
1129/* This is used by the pSeries hotplug driver to allocate resource
1130 * of newly plugged busses. We can try to consolidate with the
1131 * rest of the code later, for now, keep it as-is
1132 */
1133void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1134{
1135 struct pci_dev *dev;
1136 struct pci_bus *child_bus;
1137
1138 list_for_each_entry(dev, &bus->devices, bus_list) {
1139 int i;
1140
1141 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1142 struct resource *r = &dev->resource[i];
1143
1144 if (r->parent || !r->start || !r->flags)
1145 continue;
1146 pci_claim_resource(dev, i);
1147 }
1148 }
1149
1150 list_for_each_entry(child_bus, &bus->children, node)
1151 pcibios_claim_one_bus(child_bus);
1152}
1153EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1154#endif /* CONFIG_HOTPLUG */
1155
1156int pcibios_enable_device(struct pci_dev *dev, int mask)
1157{
1158 u16 cmd, old_cmd;
1159 int idx;
1160 struct resource *r;
1161
1162 if (ppc_md.pcibios_enable_device_hook)
1163 if (ppc_md.pcibios_enable_device_hook(dev))
1164 return -EINVAL;
1165
1166 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1167 old_cmd = cmd;
1168 for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
1169 /* Only set up the requested stuff */
1170 if (!(mask & (1 << idx)))
1171 continue;
1172 r = &dev->resource[idx];
1173 if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1174 continue;
1175 if ((idx == PCI_ROM_RESOURCE) &&
1176 (!(r->flags & IORESOURCE_ROM_ENABLE)))
1177 continue;
1178 if (r->parent == NULL) {
1179 printk(KERN_ERR "PCI: Device %s not available because"
1180 " of resource collisions\n", pci_name(dev));
1181 return -EINVAL;
1182 }
1183 if (r->flags & IORESOURCE_IO)
1184 cmd |= PCI_COMMAND_IO;
1185 if (r->flags & IORESOURCE_MEM)
1186 cmd |= PCI_COMMAND_MEMORY;
1187 }
1188 if (cmd != old_cmd) {
1189 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1190 pci_name(dev), old_cmd, cmd);
1191 pci_write_config_word(dev, PCI_COMMAND, cmd);
1192 }
1193 return 0;
1194}
1195
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0e2bee46304c..88db4ffaf11c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -13,6 +13,7 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/of.h>
16 17
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -32,19 +33,12 @@
32#endif 33#endif
33 34
34unsigned long isa_io_base = 0; 35unsigned long isa_io_base = 0;
35unsigned long isa_mem_base = 0;
36unsigned long pci_dram_offset = 0; 36unsigned long pci_dram_offset = 0;
37int pcibios_assign_bus_offset = 1; 37int pcibios_assign_bus_offset = 1;
38 38
39void pcibios_make_OF_bus_map(void); 39void pcibios_make_OF_bus_map(void);
40 40
41static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
42static int probe_resource(struct pci_bus *parent, struct resource *pr,
43 struct resource *res, struct resource **conflict);
44static void update_bridge_base(struct pci_bus *bus, int i);
45static void pcibios_fixup_resources(struct pci_dev* dev);
46static void fixup_broken_pcnet32(struct pci_dev* dev); 41static void fixup_broken_pcnet32(struct pci_dev* dev);
47static int reparent_resources(struct resource *parent, struct resource *res);
48static void fixup_cpc710_pci64(struct pci_dev* dev); 42static void fixup_cpc710_pci64(struct pci_dev* dev);
49#ifdef CONFIG_PPC_OF 43#ifdef CONFIG_PPC_OF
50static u8* pci_to_OF_bus_map; 44static u8* pci_to_OF_bus_map;
@@ -53,7 +47,7 @@ static u8* pci_to_OF_bus_map;
53/* By default, we don't re-assign bus numbers. We do this only on 47/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs 48 * some pmacs
55 */ 49 */
56int pci_assign_all_buses; 50static int pci_assign_all_buses;
57 51
58LIST_HEAD(hose_list); 52LIST_HEAD(hose_list);
59 53
@@ -100,505 +94,6 @@ fixup_cpc710_pci64(struct pci_dev* dev)
100} 94}
101DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); 95DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
102 96
103static void
104pcibios_fixup_resources(struct pci_dev *dev)
105{
106 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
107 int i;
108 unsigned long offset;
109
110 if (!hose) {
111 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
112 return;
113 }
114 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
115 struct resource *res = dev->resource + i;
116 if (!res->flags)
117 continue;
118 if (res->end == 0xffffffff) {
119 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
120 pci_name(dev), i, (u64)res->start, (u64)res->end);
121 res->end -= res->start;
122 res->start = 0;
123 res->flags |= IORESOURCE_UNSET;
124 continue;
125 }
126 offset = 0;
127 if (res->flags & IORESOURCE_MEM) {
128 offset = hose->pci_mem_offset;
129 } else if (res->flags & IORESOURCE_IO) {
130 offset = (unsigned long) hose->io_base_virt
131 - isa_io_base;
132 }
133 if (offset != 0) {
134 res->start += offset;
135 res->end += offset;
136 DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
137 i, res->flags, pci_name(dev),
138 (u64)res->start - offset, (u64)res->start);
139 }
140 }
141
142 /* Call machine specific resource fixup */
143 if (ppc_md.pcibios_fixup_resources)
144 ppc_md.pcibios_fixup_resources(dev);
145}
146DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
147
148void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
149 struct resource *res)
150{
151 unsigned long offset = 0;
152 struct pci_controller *hose = dev->sysdata;
153
154 if (hose && res->flags & IORESOURCE_IO)
155 offset = (unsigned long)hose->io_base_virt - isa_io_base;
156 else if (hose && res->flags & IORESOURCE_MEM)
157 offset = hose->pci_mem_offset;
158 region->start = res->start - offset;
159 region->end = res->end - offset;
160}
161EXPORT_SYMBOL(pcibios_resource_to_bus);
162
163void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
164 struct pci_bus_region *region)
165{
166 unsigned long offset = 0;
167 struct pci_controller *hose = dev->sysdata;
168
169 if (hose && res->flags & IORESOURCE_IO)
170 offset = (unsigned long)hose->io_base_virt - isa_io_base;
171 else if (hose && res->flags & IORESOURCE_MEM)
172 offset = hose->pci_mem_offset;
173 res->start = region->start + offset;
174 res->end = region->end + offset;
175}
176EXPORT_SYMBOL(pcibios_bus_to_resource);
177
178/*
179 * We need to avoid collisions with `mirrored' VGA ports
180 * and other strange ISA hardware, so we always want the
181 * addresses to be allocated in the 0x000-0x0ff region
182 * modulo 0x400.
183 *
184 * Why? Because some silly external IO cards only decode
185 * the low 10 bits of the IO address. The 0x00-0xff region
186 * is reserved for motherboard devices that decode all 16
187 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
188 * but we want to try to avoid allocating at 0x2900-0x2bff
189 * which might have be mirrored at 0x0100-0x03ff..
190 */
191void pcibios_align_resource(void *data, struct resource *res,
192 resource_size_t size, resource_size_t align)
193{
194 struct pci_dev *dev = data;
195
196 if (res->flags & IORESOURCE_IO) {
197 resource_size_t start = res->start;
198
199 if (size > 0x100) {
200 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
201 " (%lld bytes)\n", pci_name(dev),
202 dev->resource - res, (unsigned long long)size);
203 }
204
205 if (start & 0x300) {
206 start = (start + 0x3ff) & ~0x3ff;
207 res->start = start;
208 }
209 }
210}
211EXPORT_SYMBOL(pcibios_align_resource);
212
213/*
214 * Handle resources of PCI devices. If the world were perfect, we could
215 * just allocate all the resource regions and do nothing more. It isn't.
216 * On the other hand, we cannot just re-allocate all devices, as it would
217 * require us to know lots of host bridge internals. So we attempt to
218 * keep as much of the original configuration as possible, but tweak it
219 * when it's found to be wrong.
220 *
221 * Known BIOS problems we have to work around:
222 * - I/O or memory regions not configured
223 * - regions configured, but not enabled in the command register
224 * - bogus I/O addresses above 64K used
225 * - expansion ROMs left enabled (this may sound harmless, but given
226 * the fact the PCI specs explicitly allow address decoders to be
227 * shared between expansion ROMs and other resource regions, it's
228 * at least dangerous)
229 *
230 * Our solution:
231 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
232 * This gives us fixed barriers on where we can allocate.
233 * (2) Allocate resources for all enabled devices. If there is
234 * a collision, just mark the resource as unallocated. Also
235 * disable expansion ROMs during this step.
236 * (3) Try to allocate resources for disabled devices. If the
237 * resources were assigned correctly, everything goes well,
238 * if they weren't, they won't disturb allocation of other
239 * resources.
240 * (4) Assign new addresses to resources which were either
241 * not configured at all or misconfigured. If explicitly
242 * requested by the user, configure expansion ROM address
243 * as well.
244 */
245
246static void __init
247pcibios_allocate_bus_resources(struct list_head *bus_list)
248{
249 struct pci_bus *bus;
250 int i;
251 struct resource *res, *pr;
252
253 /* Depth-First Search on bus tree */
254 list_for_each_entry(bus, bus_list, node) {
255 for (i = 0; i < 4; ++i) {
256 if ((res = bus->resource[i]) == NULL || !res->flags
257 || res->start > res->end)
258 continue;
259 if (bus->parent == NULL)
260 pr = (res->flags & IORESOURCE_IO)?
261 &ioport_resource: &iomem_resource;
262 else {
263 pr = pci_find_parent_resource(bus->self, res);
264 if (pr == res) {
265 /* this happens when the generic PCI
266 * code (wrongly) decides that this
267 * bridge is transparent -- paulus
268 */
269 continue;
270 }
271 }
272
273 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
274 (u64)res->start, (u64)res->end, res->flags, pr);
275 if (pr) {
276 if (request_resource(pr, res) == 0)
277 continue;
278 /*
279 * Must be a conflict with an existing entry.
280 * Move that entry (or entries) under the
281 * bridge resource and try again.
282 */
283 if (reparent_resources(pr, res) == 0)
284 continue;
285 }
286 printk(KERN_ERR "PCI: Cannot allocate resource region "
287 "%d of PCI bridge %d\n", i, bus->number);
288 if (pci_relocate_bridge_resource(bus, i))
289 bus->resource[i] = NULL;
290 }
291 pcibios_allocate_bus_resources(&bus->children);
292 }
293}
294
295/*
296 * Reparent resource children of pr that conflict with res
297 * under res, and make res replace those children.
298 */
299static int __init
300reparent_resources(struct resource *parent, struct resource *res)
301{
302 struct resource *p, **pp;
303 struct resource **firstpp = NULL;
304
305 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
306 if (p->end < res->start)
307 continue;
308 if (res->end < p->start)
309 break;
310 if (p->start < res->start || p->end > res->end)
311 return -1; /* not completely contained */
312 if (firstpp == NULL)
313 firstpp = pp;
314 }
315 if (firstpp == NULL)
316 return -1; /* didn't find any conflicting entries? */
317 res->parent = parent;
318 res->child = *firstpp;
319 res->sibling = *pp;
320 *firstpp = res;
321 *pp = NULL;
322 for (p = res->child; p != NULL; p = p->sibling) {
323 p->parent = res;
324 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
325 p->name, (u64)p->start, (u64)p->end, res->name);
326 }
327 return 0;
328}
329
330/*
331 * A bridge has been allocated a range which is outside the range
332 * of its parent bridge, so it needs to be moved.
333 */
334static int __init
335pci_relocate_bridge_resource(struct pci_bus *bus, int i)
336{
337 struct resource *res, *pr, *conflict;
338 unsigned long try, size;
339 int j;
340 struct pci_bus *parent = bus->parent;
341
342 if (parent == NULL) {
343 /* shouldn't ever happen */
344 printk(KERN_ERR "PCI: can't move host bridge resource\n");
345 return -1;
346 }
347 res = bus->resource[i];
348 if (res == NULL)
349 return -1;
350 pr = NULL;
351 for (j = 0; j < 4; j++) {
352 struct resource *r = parent->resource[j];
353 if (!r)
354 continue;
355 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
356 continue;
357 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
358 pr = r;
359 break;
360 }
361 if (res->flags & IORESOURCE_PREFETCH)
362 pr = r;
363 }
364 if (pr == NULL)
365 return -1;
366 size = res->end - res->start;
367 if (pr->start > pr->end || size > pr->end - pr->start)
368 return -1;
369 try = pr->end;
370 for (;;) {
371 res->start = try - size;
372 res->end = try;
373 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
374 break;
375 if (conflict->start <= pr->start + size)
376 return -1;
377 try = conflict->start - 1;
378 }
379 if (request_resource(pr, res)) {
380 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
381 (u64)res->start, (u64)res->end);
382 return -1; /* "can't happen" */
383 }
384 update_bridge_base(bus, i);
385 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
386 bus->number, i, (unsigned long long)res->start,
387 (unsigned long long)res->end);
388 return 0;
389}
390
391static int __init
392probe_resource(struct pci_bus *parent, struct resource *pr,
393 struct resource *res, struct resource **conflict)
394{
395 struct pci_bus *bus;
396 struct pci_dev *dev;
397 struct resource *r;
398 int i;
399
400 for (r = pr->child; r != NULL; r = r->sibling) {
401 if (r->end >= res->start && res->end >= r->start) {
402 *conflict = r;
403 return 1;
404 }
405 }
406 list_for_each_entry(bus, &parent->children, node) {
407 for (i = 0; i < 4; ++i) {
408 if ((r = bus->resource[i]) == NULL)
409 continue;
410 if (!r->flags || r->start > r->end || r == res)
411 continue;
412 if (pci_find_parent_resource(bus->self, r) != pr)
413 continue;
414 if (r->end >= res->start && res->end >= r->start) {
415 *conflict = r;
416 return 1;
417 }
418 }
419 }
420 list_for_each_entry(dev, &parent->devices, bus_list) {
421 for (i = 0; i < 6; ++i) {
422 r = &dev->resource[i];
423 if (!r->flags || (r->flags & IORESOURCE_UNSET))
424 continue;
425 if (pci_find_parent_resource(dev, r) != pr)
426 continue;
427 if (r->end >= res->start && res->end >= r->start) {
428 *conflict = r;
429 return 1;
430 }
431 }
432 }
433 return 0;
434}
435
436void __init
437update_bridge_resource(struct pci_dev *dev, struct resource *res)
438{
439 u8 io_base_lo, io_limit_lo;
440 u16 mem_base, mem_limit;
441 u16 cmd;
442 unsigned long start, end, off;
443 struct pci_controller *hose = dev->sysdata;
444
445 if (!hose) {
446 printk("update_bridge_base: no hose?\n");
447 return;
448 }
449 pci_read_config_word(dev, PCI_COMMAND, &cmd);
450 pci_write_config_word(dev, PCI_COMMAND,
451 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
452 if (res->flags & IORESOURCE_IO) {
453 off = (unsigned long) hose->io_base_virt - isa_io_base;
454 start = res->start - off;
455 end = res->end - off;
456 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
457 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
458 if (end > 0xffff)
459 io_base_lo |= PCI_IO_RANGE_TYPE_32;
460 else
461 io_base_lo |= PCI_IO_RANGE_TYPE_16;
462 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
463 start >> 16);
464 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
465 end >> 16);
466 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
467 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
468
469 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
470 == IORESOURCE_MEM) {
471 off = hose->pci_mem_offset;
472 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
473 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
474 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
475 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
476
477 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
478 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
479 off = hose->pci_mem_offset;
480 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
481 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
482 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
483 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
484
485 } else {
486 DBG(KERN_ERR "PCI: ugh, bridge %s res has flags=%lx\n",
487 pci_name(dev), res->flags);
488 }
489 pci_write_config_word(dev, PCI_COMMAND, cmd);
490}
491
492static void __init
493update_bridge_base(struct pci_bus *bus, int i)
494{
495 struct resource *res = bus->resource[i];
496 struct pci_dev *dev = bus->self;
497 update_bridge_resource(dev, res);
498}
499
500static inline void alloc_resource(struct pci_dev *dev, int idx)
501{
502 struct resource *pr, *r = &dev->resource[idx];
503
504 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
505 pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
506 pr = pci_find_parent_resource(dev, r);
507 if (!pr || request_resource(pr, r) < 0) {
508 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
509 " of device %s\n", idx, pci_name(dev));
510 if (pr)
511 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
512 pr, (u64)pr->start, (u64)pr->end, pr->flags);
513 /* We'll assign a new address later */
514 r->flags |= IORESOURCE_UNSET;
515 r->end -= r->start;
516 r->start = 0;
517 }
518}
519
520static void __init
521pcibios_allocate_resources(int pass)
522{
523 struct pci_dev *dev = NULL;
524 int idx, disabled;
525 u16 command;
526 struct resource *r;
527
528 for_each_pci_dev(dev) {
529 pci_read_config_word(dev, PCI_COMMAND, &command);
530 for (idx = 0; idx < 6; idx++) {
531 r = &dev->resource[idx];
532 if (r->parent) /* Already allocated */
533 continue;
534 if (!r->flags || (r->flags & IORESOURCE_UNSET))
535 continue; /* Not assigned at all */
536 if (r->flags & IORESOURCE_IO)
537 disabled = !(command & PCI_COMMAND_IO);
538 else
539 disabled = !(command & PCI_COMMAND_MEMORY);
540 if (pass == disabled)
541 alloc_resource(dev, idx);
542 }
543 if (pass)
544 continue;
545 r = &dev->resource[PCI_ROM_RESOURCE];
546 if (r->flags & IORESOURCE_ROM_ENABLE) {
547 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
548 u32 reg;
549 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
550 r->flags &= ~IORESOURCE_ROM_ENABLE;
551 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
552 pci_write_config_dword(dev, dev->rom_base_reg,
553 reg & ~PCI_ROM_ADDRESS_ENABLE);
554 }
555 }
556}
557
558static void __init
559pcibios_assign_resources(void)
560{
561 struct pci_dev *dev = NULL;
562 int idx;
563 struct resource *r;
564
565 for_each_pci_dev(dev) {
566 int class = dev->class >> 8;
567
568 /* Don't touch classless devices and host bridges */
569 if (!class || class == PCI_CLASS_BRIDGE_HOST)
570 continue;
571
572 for (idx = 0; idx < 6; idx++) {
573 r = &dev->resource[idx];
574
575 /*
576 * We shall assign a new address to this resource,
577 * either because the BIOS (sic) forgot to do so
578 * or because we have decided the old address was
579 * unusable for some reason.
580 */
581 if ((r->flags & IORESOURCE_UNSET) && r->end &&
582 (!ppc_md.pcibios_enable_device_hook ||
583 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
584 int rc;
585
586 r->flags &= ~IORESOURCE_UNSET;
587 rc = pci_assign_resource(dev, idx);
588 BUG_ON(rc);
589 }
590 }
591
592#if 0 /* don't assign ROMs */
593 r = &dev->resource[PCI_ROM_RESOURCE];
594 r->end -= r->start;
595 r->start = 0;
596 if (r->end)
597 pci_assign_resource(dev, PCI_ROM_RESOURCE);
598#endif
599 }
600}
601
602#ifdef CONFIG_PPC_OF 97#ifdef CONFIG_PPC_OF
603/* 98/*
604 * Functions below are used on OpenFirmware machines. 99 * Functions below are used on OpenFirmware machines.
@@ -619,7 +114,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
619 } else 114 } else
620 pci_to_OF_bus_map[pci_bus] = bus_range[0]; 115 pci_to_OF_bus_map[pci_bus] = bus_range[0];
621 116
622 for (node=node->child; node != 0;node = node->sibling) { 117 for_each_child_of_node(node, node) {
623 struct pci_dev* dev; 118 struct pci_dev* dev;
624 const unsigned int *class_code, *reg; 119 const unsigned int *class_code, *reg;
625 120
@@ -662,8 +157,8 @@ pcibios_make_OF_bus_map(void)
662 157
663 /* For each hose, we begin searching bridges */ 158 /* For each hose, we begin searching bridges */
664 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 159 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
665 struct device_node* node; 160 struct device_node* node = hose->dn;
666 node = (struct device_node *)hose->arch_data; 161
667 if (!node) 162 if (!node)
668 continue; 163 continue;
669 make_one_node_map(node, hose->first_busno); 164 make_one_node_map(node, hose->first_busno);
@@ -688,15 +183,18 @@ pcibios_make_OF_bus_map(void)
688typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); 183typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
689 184
690static struct device_node* 185static struct device_node*
691scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) 186scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
692{ 187{
188 struct device_node *node;
693 struct device_node* sub_node; 189 struct device_node* sub_node;
694 190
695 for (; node != 0;node = node->sibling) { 191 for_each_child_of_node(parent, node) {
696 const unsigned int *class_code; 192 const unsigned int *class_code;
697 193
698 if (filter(node, data)) 194 if (filter(node, data)) {
195 of_node_put(node);
699 return node; 196 return node;
197 }
700 198
701 /* For PCI<->PCI bridges or CardBus bridges, we go down 199 /* For PCI<->PCI bridges or CardBus bridges, we go down
702 * Note: some OFs create a parent node "multifunc-device" as 200 * Note: some OFs create a parent node "multifunc-device" as
@@ -708,9 +206,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
708 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && 206 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
709 strcmp(node->name, "multifunc-device")) 207 strcmp(node->name, "multifunc-device"))
710 continue; 208 continue;
711 sub_node = scan_OF_pci_childs(node->child, filter, data); 209 sub_node = scan_OF_pci_childs(node, filter, data);
712 if (sub_node) 210 if (sub_node) {
211 of_node_put(node);
713 return sub_node; 212 return sub_node;
213 }
714 } 214 }
715 return NULL; 215 return NULL;
716} 216}
@@ -718,11 +218,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
718static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, 218static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
719 unsigned int devfn) 219 unsigned int devfn)
720{ 220{
721 struct device_node *np = NULL; 221 struct device_node *np;
722 const u32 *reg; 222 const u32 *reg;
723 unsigned int psize; 223 unsigned int psize;
724 224
725 while ((np = of_get_next_child(parent, np)) != NULL) { 225 for_each_child_of_node(parent, np) {
726 reg = of_get_property(np, "reg", &psize); 226 reg = of_get_property(np, "reg", &psize);
727 if (reg == NULL || psize < 4) 227 if (reg == NULL || psize < 4)
728 continue; 228 continue;
@@ -742,7 +242,7 @@ static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
742 struct pci_controller *hose = pci_bus_to_host(bus); 242 struct pci_controller *hose = pci_bus_to_host(bus);
743 if (hose == NULL) 243 if (hose == NULL)
744 return NULL; 244 return NULL;
745 return of_node_get(hose->arch_data); 245 return of_node_get(hose->dn);
746 } 246 }
747 247
748 /* not a root bus, we need to get our parent */ 248 /* not a root bus, we need to get our parent */
@@ -812,9 +312,9 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
812 return -ENODEV; 312 return -ENODEV;
813 /* Make sure it's really a PCI device */ 313 /* Make sure it's really a PCI device */
814 hose = pci_find_hose_for_OF_device(node); 314 hose = pci_find_hose_for_OF_device(node);
815 if (!hose || !hose->arch_data) 315 if (!hose || !hose->dn)
816 return -ENODEV; 316 return -ENODEV;
817 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, 317 if (!scan_OF_pci_childs(hose->dn,
818 find_OF_pci_device_filter, (void *)node)) 318 find_OF_pci_device_filter, (void *)node))
819 return -ENODEV; 319 return -ENODEV;
820 reg = of_get_property(node, "reg", NULL); 320 reg = of_get_property(node, "reg", NULL);
@@ -843,120 +343,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
843} 343}
844EXPORT_SYMBOL(pci_device_from_OF_node); 344EXPORT_SYMBOL(pci_device_from_OF_node);
845 345
846void __init
847pci_process_bridge_OF_ranges(struct pci_controller *hose,
848 struct device_node *dev, int primary)
849{
850 static unsigned int static_lc_ranges[256] __initdata;
851 const unsigned int *dt_ranges;
852 unsigned int *lc_ranges, *ranges, *prev, size;
853 int rlen = 0, orig_rlen;
854 int memno = 0;
855 struct resource *res;
856 int np, na = of_n_addr_cells(dev);
857 np = na + 5;
858
859 /* First we try to merge ranges to fix a problem with some pmacs
860 * that can have more than 3 ranges, fortunately using contiguous
861 * addresses -- BenH
862 */
863 dt_ranges = of_get_property(dev, "ranges", &rlen);
864 if (!dt_ranges)
865 return;
866 /* Sanity check, though hopefully that never happens */
867 if (rlen > sizeof(static_lc_ranges)) {
868 printk(KERN_WARNING "OF ranges property too large !\n");
869 rlen = sizeof(static_lc_ranges);
870 }
871 lc_ranges = static_lc_ranges;
872 memcpy(lc_ranges, dt_ranges, rlen);
873 orig_rlen = rlen;
874
875 /* Let's work on a copy of the "ranges" property instead of damaging
876 * the device-tree image in memory
877 */
878 ranges = lc_ranges;
879 prev = NULL;
880 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
881 if (prev) {
882 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
883 (prev[2] + prev[na+4]) == ranges[2] &&
884 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
885 prev[na+4] += ranges[na+4];
886 ranges[0] = 0;
887 ranges += np;
888 continue;
889 }
890 }
891 prev = ranges;
892 ranges += np;
893 }
894
895 /*
896 * The ranges property is laid out as an array of elements,
897 * each of which comprises:
898 * cells 0 - 2: a PCI address
899 * cells 3 or 3+4: a CPU physical address
900 * (size depending on dev->n_addr_cells)
901 * cells 4+5 or 5+6: the size of the range
902 */
903 ranges = lc_ranges;
904 rlen = orig_rlen;
905 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
906 res = NULL;
907 size = ranges[na+4];
908 switch ((ranges[0] >> 24) & 0x3) {
909 case 1: /* I/O space */
910 if (ranges[2] != 0)
911 break;
912 hose->io_base_phys = ranges[na+2];
913 /* limit I/O space to 16MB */
914 if (size > 0x01000000)
915 size = 0x01000000;
916 hose->io_base_virt = ioremap(ranges[na+2], size);
917 if (primary)
918 isa_io_base = (unsigned long) hose->io_base_virt;
919 res = &hose->io_resource;
920 res->flags = IORESOURCE_IO;
921 res->start = ranges[2];
922 DBG("PCI: IO 0x%llx -> 0x%llx\n",
923 (u64)res->start, (u64)res->start + size - 1);
924 break;
925 case 2: /* memory space */
926 memno = 0;
927 if (ranges[1] == 0 && ranges[2] == 0
928 && ranges[na+4] <= (16 << 20)) {
929 /* 1st 16MB, i.e. ISA memory area */
930 if (primary)
931 isa_mem_base = ranges[na+2];
932 memno = 1;
933 }
934 while (memno < 3 && hose->mem_resources[memno].flags)
935 ++memno;
936 if (memno == 0)
937 hose->pci_mem_offset = ranges[na+2] - ranges[2];
938 if (memno < 3) {
939 res = &hose->mem_resources[memno];
940 res->flags = IORESOURCE_MEM;
941 if(ranges[0] & 0x40000000)
942 res->flags |= IORESOURCE_PREFETCH;
943 res->start = ranges[na+2];
944 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
945 (u64)res->start, (u64)res->start + size - 1);
946 }
947 break;
948 }
949 if (res != NULL) {
950 res->name = dev->full_name;
951 res->end = res->start + size - 1;
952 res->parent = NULL;
953 res->sibling = NULL;
954 res->child = NULL;
955 }
956 ranges += np;
957 }
958}
959
960/* We create the "pci-OF-bus-map" property now so it appears in the 346/* We create the "pci-OF-bus-map" property now so it appears in the
961 * /proc device tree 347 * /proc device tree
962 */ 348 */
@@ -986,219 +372,7 @@ void pcibios_make_OF_bus_map(void)
986} 372}
987#endif /* CONFIG_PPC_OF */ 373#endif /* CONFIG_PPC_OF */
988 374
989#ifdef CONFIG_PPC_PMAC 375static int __init pcibios_init(void)
990/*
991 * This set of routines checks for PCI<->PCI bridges that have closed
992 * IO resources and have child devices. It tries to re-open an IO
993 * window on them.
994 *
995 * This is a _temporary_ fix to workaround a problem with Apple's OF
996 * closing IO windows on P2P bridges when the OF drivers of cards
997 * below this bridge don't claim any IO range (typically ATI or
998 * Adaptec).
999 *
1000 * A more complete fix would be to use drivers/pci/setup-bus.c, which
1001 * involves a working pcibios_fixup_pbus_ranges(), some more care about
1002 * ordering when creating the host bus resources, and maybe a few more
1003 * minor tweaks
1004 */
1005
1006/* Initialize bridges with base/limit values we have collected */
1007static void __init
1008do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1009{
1010 struct pci_dev *bridge = bus->self;
1011 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1012 u32 l;
1013 u16 w;
1014 struct resource res;
1015
1016 if (bus->resource[0] == NULL)
1017 return;
1018 res = *(bus->resource[0]);
1019
1020 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1021 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1022 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
1023 DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
1024
1025 /* Set up the top and bottom of the PCI I/O segment for this bus. */
1026 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1027 l &= 0xffff000f;
1028 l |= (res.start >> 8) & 0x00f0;
1029 l |= res.end & 0xf000;
1030 pci_write_config_dword(bridge, PCI_IO_BASE, l);
1031
1032 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1033 l = (res.start >> 16) | (res.end & 0xffff0000);
1034 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1035 }
1036
1037 pci_read_config_word(bridge, PCI_COMMAND, &w);
1038 w |= PCI_COMMAND_IO;
1039 pci_write_config_word(bridge, PCI_COMMAND, w);
1040
1041#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1042 if (enable_vga) {
1043 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1044 w |= PCI_BRIDGE_CTL_VGA;
1045 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1046 }
1047#endif
1048}
1049
1050/* This function is pretty basic and actually quite broken for the
1051 * general case, it's enough for us right now though. It's supposed
1052 * to tell us if we need to open an IO range at all or not and what
1053 * size.
1054 */
1055static int __init
1056check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1057{
1058 struct pci_dev *dev;
1059 int i;
1060 int rc = 0;
1061
1062#define push_end(res, mask) do { \
1063 BUG_ON((mask+1) & mask); \
1064 res->end = (res->end + mask) | mask; \
1065} while (0)
1066
1067 list_for_each_entry(dev, &bus->devices, bus_list) {
1068 u16 class = dev->class >> 8;
1069
1070 if (class == PCI_CLASS_DISPLAY_VGA ||
1071 class == PCI_CLASS_NOT_DEFINED_VGA)
1072 *found_vga = 1;
1073 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1074 rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1075 if (class == PCI_CLASS_BRIDGE_CARDBUS)
1076 push_end(res, 0xfff);
1077
1078 for (i=0; i<PCI_NUM_RESOURCES; i++) {
1079 struct resource *r;
1080 unsigned long r_size;
1081
1082 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1083 && i >= PCI_BRIDGE_RESOURCES)
1084 continue;
1085 r = &dev->resource[i];
1086 r_size = r->end - r->start;
1087 if (r_size < 0xfff)
1088 r_size = 0xfff;
1089 if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1090 rc = 1;
1091 push_end(res, r_size);
1092 }
1093 }
1094 }
1095
1096 return rc;
1097}
1098
1099/* Here we scan all P2P bridges of a given level that have a closed
1100 * IO window. Note that the test for the presence of a VGA card should
1101 * be improved to take into account already configured P2P bridges,
1102 * currently, we don't see them and might end up configuring 2 bridges
1103 * with VGA pass through enabled
1104 */
1105static void __init
1106do_fixup_p2p_level(struct pci_bus *bus)
1107{
1108 struct pci_bus *b;
1109 int i, parent_io;
1110 int has_vga = 0;
1111
1112 for (parent_io=0; parent_io<4; parent_io++)
1113 if (bus->resource[parent_io]
1114 && bus->resource[parent_io]->flags & IORESOURCE_IO)
1115 break;
1116 if (parent_io >= 4)
1117 return;
1118
1119 list_for_each_entry(b, &bus->children, node) {
1120 struct pci_dev *d = b->self;
1121 struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1122 struct resource *res = b->resource[0];
1123 struct resource tmp_res;
1124 unsigned long max;
1125 int found_vga = 0;
1126
1127 memset(&tmp_res, 0, sizeof(tmp_res));
1128 tmp_res.start = bus->resource[parent_io]->start;
1129
1130 /* We don't let low addresses go through that closed P2P bridge, well,
1131 * that may not be necessary but I feel safer that way
1132 */
1133 if (tmp_res.start == 0)
1134 tmp_res.start = 0x1000;
1135
1136 if (!list_empty(&b->devices) && res && res->flags == 0 &&
1137 res != bus->resource[parent_io] &&
1138 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1139 check_for_io_childs(b, &tmp_res, &found_vga)) {
1140 u8 io_base_lo;
1141
1142 printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1143
1144 if (found_vga) {
1145 if (has_vga) {
1146 printk(KERN_WARNING "Skipping VGA, already active"
1147 " on bus segment\n");
1148 found_vga = 0;
1149 } else
1150 has_vga = 1;
1151 }
1152 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1153
1154 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1155 max = ((unsigned long) hose->io_base_virt
1156 - isa_io_base) + 0xffffffff;
1157 else
1158 max = ((unsigned long) hose->io_base_virt
1159 - isa_io_base) + 0xffff;
1160
1161 *res = tmp_res;
1162 res->flags = IORESOURCE_IO;
1163 res->name = b->name;
1164
1165 /* Find a resource in the parent where we can allocate */
1166 for (i = 0 ; i < 4; i++) {
1167 struct resource *r = bus->resource[i];
1168 if (!r)
1169 continue;
1170 if ((r->flags & IORESOURCE_IO) == 0)
1171 continue;
1172 DBG("Trying to allocate from %016llx, size %016llx from parent"
1173 " res %d: %016llx -> %016llx\n",
1174 res->start, res->end, i, r->start, r->end);
1175
1176 if (allocate_resource(r, res, res->end + 1, res->start, max,
1177 res->end + 1, NULL, NULL) < 0) {
1178 DBG("Failed !\n");
1179 continue;
1180 }
1181 do_update_p2p_io_resource(b, found_vga);
1182 break;
1183 }
1184 }
1185 do_fixup_p2p_level(b);
1186 }
1187}
1188
1189static void
1190pcibios_fixup_p2p_bridges(void)
1191{
1192 struct pci_bus *b;
1193
1194 list_for_each_entry(b, &pci_root_buses, node)
1195 do_fixup_p2p_level(b);
1196}
1197
1198#endif /* CONFIG_PPC_PMAC */
1199
1200static int __init
1201pcibios_init(void)
1202{ 376{
1203 struct pci_controller *hose, *tmp; 377 struct pci_controller *hose, *tmp;
1204 struct pci_bus *bus; 378 struct pci_bus *bus;
@@ -1206,6 +380,9 @@ pcibios_init(void)
1206 380
1207 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 381 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1208 382
383 if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
384 pci_assign_all_buses = 1;
385
1209 /* Scan all of the recorded PCI controllers. */ 386 /* Scan all of the recorded PCI controllers. */
1210 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 387 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1211 if (pci_assign_all_buses) 388 if (pci_assign_all_buses)
@@ -1213,9 +390,10 @@ pcibios_init(void)
1213 hose->last_busno = 0xff; 390 hose->last_busno = 0xff;
1214 bus = pci_scan_bus_parented(hose->parent, hose->first_busno, 391 bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
1215 hose->ops, hose); 392 hose->ops, hose);
1216 if (bus) 393 if (bus) {
1217 pci_bus_add_devices(bus); 394 pci_bus_add_devices(bus);
1218 hose->last_busno = bus->subordinate; 395 hose->last_busno = bus->subordinate;
396 }
1219 if (pci_assign_all_buses || next_busno <= hose->last_busno) 397 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1220 next_busno = hose->last_busno + pcibios_assign_bus_offset; 398 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1221 } 399 }
@@ -1228,18 +406,8 @@ pcibios_init(void)
1228 if (pci_assign_all_buses && have_of) 406 if (pci_assign_all_buses && have_of)
1229 pcibios_make_OF_bus_map(); 407 pcibios_make_OF_bus_map();
1230 408
1231 /* Call machine dependent fixup */ 409 /* Call common code to handle resource allocation */
1232 if (ppc_md.pcibios_fixup) 410 pcibios_resource_survey();
1233 ppc_md.pcibios_fixup();
1234
1235 /* Allocate and assign resources */
1236 pcibios_allocate_bus_resources(&pci_root_buses);
1237 pcibios_allocate_resources(0);
1238 pcibios_allocate_resources(1);
1239#ifdef CONFIG_PPC_PMAC
1240 pcibios_fixup_p2p_bridges();
1241#endif /* CONFIG_PPC_PMAC */
1242 pcibios_assign_resources();
1243 411
1244 /* Call machine dependent post-init code */ 412 /* Call machine dependent post-init code */
1245 if (ppc_md.pcibios_after_init) 413 if (ppc_md.pcibios_after_init)
@@ -1250,14 +418,14 @@ pcibios_init(void)
1250 418
1251subsys_initcall(pcibios_init); 419subsys_initcall(pcibios_init);
1252 420
1253void pcibios_fixup_bus(struct pci_bus *bus) 421void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
1254{ 422{
1255 struct pci_controller *hose = (struct pci_controller *) bus->sysdata; 423 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1256 unsigned long io_offset; 424 unsigned long io_offset;
1257 struct resource *res; 425 struct resource *res;
1258 struct pci_dev *dev;
1259 int i; 426 int i;
1260 427
428 /* Hookup PHB resources */
1261 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 429 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1262 if (bus->parent == NULL) { 430 if (bus->parent == NULL) {
1263 /* This is a host bridge - fill in its resources */ 431 /* This is a host bridge - fill in its resources */
@@ -1272,8 +440,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
1272 res->end = IO_SPACE_LIMIT; 440 res->end = IO_SPACE_LIMIT;
1273 res->flags = IORESOURCE_IO; 441 res->flags = IORESOURCE_IO;
1274 } 442 }
1275 res->start += io_offset; 443 res->start = (res->start + io_offset) & 0xffffffffu;
1276 res->end += io_offset; 444 res->end = (res->end + io_offset) & 0xffffffffu;
1277 445
1278 for (i = 0; i < 3; ++i) { 446 for (i = 0; i < 3; ++i) {
1279 res = &hose->mem_resources[i]; 447 res = &hose->mem_resources[i];
@@ -1288,35 +456,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
1288 } 456 }
1289 bus->resource[i+1] = res; 457 bus->resource[i+1] = res;
1290 } 458 }
1291 } else {
1292 /* This is a subordinate bridge */
1293 pci_read_bridge_bases(bus);
1294
1295 for (i = 0; i < 4; ++i) {
1296 if ((res = bus->resource[i]) == NULL)
1297 continue;
1298 if (!res->flags || bus->self->transparent)
1299 continue;
1300 if (io_offset && (res->flags & IORESOURCE_IO)) {
1301 res->start += io_offset;
1302 res->end += io_offset;
1303 } else if (hose->pci_mem_offset
1304 && (res->flags & IORESOURCE_MEM)) {
1305 res->start += hose->pci_mem_offset;
1306 res->end += hose->pci_mem_offset;
1307 }
1308 }
1309 }
1310
1311 /* Platform specific bus fixups */
1312 if (ppc_md.pcibios_fixup_bus)
1313 ppc_md.pcibios_fixup_bus(bus);
1314
1315 /* Read default IRQs and fixup if necessary */
1316 list_for_each_entry(dev, &bus->devices, bus_list) {
1317 pci_read_irq_line(dev);
1318 if (ppc_md.pci_irq_fixup)
1319 ppc_md.pci_irq_fixup(dev);
1320 } 459 }
1321} 460}
1322 461
@@ -1328,37 +467,6 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
1328 /* XXX FIXME - update OF device tree node interrupt property */ 467 /* XXX FIXME - update OF device tree node interrupt property */
1329} 468}
1330 469
1331int pcibios_enable_device(struct pci_dev *dev, int mask)
1332{
1333 u16 cmd, old_cmd;
1334 int idx;
1335 struct resource *r;
1336
1337 if (ppc_md.pcibios_enable_device_hook)
1338 if (ppc_md.pcibios_enable_device_hook(dev, 0))
1339 return -EINVAL;
1340
1341 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1342 old_cmd = cmd;
1343 for (idx=0; idx<6; idx++) {
1344 r = &dev->resource[idx];
1345 if (r->flags & IORESOURCE_UNSET) {
1346 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1347 return -EINVAL;
1348 }
1349 if (r->flags & IORESOURCE_IO)
1350 cmd |= PCI_COMMAND_IO;
1351 if (r->flags & IORESOURCE_MEM)
1352 cmd |= PCI_COMMAND_MEMORY;
1353 }
1354 if (cmd != old_cmd) {
1355 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1356 pci_name(dev), old_cmd, cmd);
1357 pci_write_config_word(dev, PCI_COMMAND, cmd);
1358 }
1359 return 0;
1360}
1361
1362static struct pci_controller* 470static struct pci_controller*
1363pci_bus_to_hose(int bus) 471pci_bus_to_hose(int bus)
1364{ 472{
@@ -1381,17 +489,6 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1381 struct pci_controller* hose; 489 struct pci_controller* hose;
1382 long result = -EOPNOTSUPP; 490 long result = -EOPNOTSUPP;
1383 491
1384 /* Argh ! Please forgive me for that hack, but that's the
1385 * simplest way to get existing XFree to not lockup on some
1386 * G5 machines... So when something asks for bus 0 io base
1387 * (bus 0 is HT root), we return the AGP one instead.
1388 */
1389#ifdef CONFIG_PPC_PMAC
1390 if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
1391 if (bus == 0)
1392 bus = 0xf0;
1393#endif /* CONFIG_PPC_PMAC */
1394
1395 hose = pci_bus_to_hose(bus); 492 hose = pci_bus_to_hose(bus);
1396 if (!hose) 493 if (!hose)
1397 return -ENODEV; 494 return -ENODEV;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9f63bdcb0bdf..52750745edfd 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -31,7 +31,6 @@
31#include <asm/byteorder.h> 31#include <asm/byteorder.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/ppc-pci.h> 33#include <asm/ppc-pci.h>
34#include <asm/firmware.h>
35 34
36#ifdef DEBUG 35#ifdef DEBUG
37#include <asm/udbg.h> 36#include <asm/udbg.h>
@@ -41,10 +40,6 @@
41#endif 40#endif
42 41
43unsigned long pci_probe_only = 1; 42unsigned long pci_probe_only = 1;
44int pci_assign_all_buses = 0;
45
46static void fixup_resource(struct resource *res, struct pci_dev *dev);
47static void do_bus_setup(struct pci_bus *bus);
48 43
49/* pci_io_base -- the base address from which io bars are offsets. 44/* pci_io_base -- the base address from which io bars are offsets.
50 * This is the lowest I/O base address (so bar values are always positive), 45 * This is the lowest I/O base address (so bar values are always positive),
@@ -70,139 +65,31 @@ struct dma_mapping_ops *get_pci_dma_ops(void)
70} 65}
71EXPORT_SYMBOL(get_pci_dma_ops); 66EXPORT_SYMBOL(get_pci_dma_ops);
72 67
73static void fixup_broken_pcnet32(struct pci_dev* dev)
74{
75 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
76 dev->vendor = PCI_VENDOR_ID_AMD;
77 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
78 }
79}
80DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
81 68
82void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 69int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
83 struct resource *res)
84{ 70{
85 unsigned long offset = 0; 71 return dma_set_mask(&dev->dev, mask);
86 struct pci_controller *hose = pci_bus_to_host(dev->bus);
87
88 if (!hose)
89 return;
90
91 if (res->flags & IORESOURCE_IO)
92 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
93
94 if (res->flags & IORESOURCE_MEM)
95 offset = hose->pci_mem_offset;
96
97 region->start = res->start - offset;
98 region->end = res->end - offset;
99} 72}
100 73
101void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 74int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
102 struct pci_bus_region *region)
103{ 75{
104 unsigned long offset = 0; 76 int rc;
105 struct pci_controller *hose = pci_bus_to_host(dev->bus);
106
107 if (!hose)
108 return;
109 77
110 if (res->flags & IORESOURCE_IO) 78 rc = dma_set_mask(&dev->dev, mask);
111 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 79 dev->dev.coherent_dma_mask = dev->dma_mask;
112 80
113 if (res->flags & IORESOURCE_MEM) 81 return rc;
114 offset = hose->pci_mem_offset;
115
116 res->start = region->start + offset;
117 res->end = region->end + offset;
118} 82}
119 83
120#ifdef CONFIG_HOTPLUG 84static void fixup_broken_pcnet32(struct pci_dev* dev)
121EXPORT_SYMBOL(pcibios_resource_to_bus);
122EXPORT_SYMBOL(pcibios_bus_to_resource);
123#endif
124
125/*
126 * We need to avoid collisions with `mirrored' VGA ports
127 * and other strange ISA hardware, so we always want the
128 * addresses to be allocated in the 0x000-0x0ff region
129 * modulo 0x400.
130 *
131 * Why? Because some silly external IO cards only decode
132 * the low 10 bits of the IO address. The 0x00-0xff region
133 * is reserved for motherboard devices that decode all 16
134 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
135 * but we want to try to avoid allocating at 0x2900-0x2bff
136 * which might have be mirrored at 0x0100-0x03ff..
137 */
138void pcibios_align_resource(void *data, struct resource *res,
139 resource_size_t size, resource_size_t align)
140{
141 struct pci_dev *dev = data;
142 struct pci_controller *hose = pci_bus_to_host(dev->bus);
143 resource_size_t start = res->start;
144 unsigned long alignto;
145
146 if (res->flags & IORESOURCE_IO) {
147 unsigned long offset = (unsigned long)hose->io_base_virt -
148 _IO_BASE;
149 /* Make sure we start at our min on all hoses */
150 if (start - offset < PCIBIOS_MIN_IO)
151 start = PCIBIOS_MIN_IO + offset;
152
153 /*
154 * Put everything into 0x00-0xff region modulo 0x400
155 */
156 if (start & 0x300)
157 start = (start + 0x3ff) & ~0x3ff;
158
159 } else if (res->flags & IORESOURCE_MEM) {
160 /* Make sure we start at our min on all hoses */
161 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
162 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
163
164 /* Align to multiple of size of minimum base. */
165 alignto = max(0x1000UL, align);
166 start = ALIGN(start, alignto);
167 }
168
169 res->start = start;
170}
171
172void __devinit pcibios_claim_one_bus(struct pci_bus *b)
173{ 85{
174 struct pci_dev *dev; 86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
175 struct pci_bus *child_bus; 87 dev->vendor = PCI_VENDOR_ID_AMD;
176 88 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
177 list_for_each_entry(dev, &b->devices, bus_list) {
178 int i;
179
180 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
181 struct resource *r = &dev->resource[i];
182
183 if (r->parent || !r->start || !r->flags)
184 continue;
185 pci_claim_resource(dev, i);
186 }
187 } 89 }
188
189 list_for_each_entry(child_bus, &b->children, node)
190 pcibios_claim_one_bus(child_bus);
191} 90}
192#ifdef CONFIG_HOTPLUG 91DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
193EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
194#endif
195
196static void __init pcibios_claim_of_setup(void)
197{
198 struct pci_bus *b;
199
200 if (firmware_has_feature(FW_FEATURE_ISERIES))
201 return;
202 92
203 list_for_each_entry(b, &pci_root_buses, node)
204 pcibios_claim_one_bus(b);
205}
206 93
207static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 94static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
208{ 95{
@@ -270,7 +157,6 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
270 res->end = base + size - 1; 157 res->end = base + size - 1;
271 res->flags = flags; 158 res->flags = flags;
272 res->name = pci_name(dev); 159 res->name = pci_name(dev);
273 fixup_resource(res, dev);
274 } 160 }
275} 161}
276 162
@@ -339,16 +225,17 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
339EXPORT_SYMBOL(of_create_pci_dev); 225EXPORT_SYMBOL(of_create_pci_dev);
340 226
341void __devinit of_scan_bus(struct device_node *node, 227void __devinit of_scan_bus(struct device_node *node,
342 struct pci_bus *bus) 228 struct pci_bus *bus)
343{ 229{
344 struct device_node *child = NULL; 230 struct device_node *child;
345 const u32 *reg; 231 const u32 *reg;
346 int reglen, devfn; 232 int reglen, devfn;
347 struct pci_dev *dev; 233 struct pci_dev *dev;
348 234
349 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 235 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
350 236
351 while ((child = of_get_next_child(node, child)) != NULL) { 237 /* Scan direct children */
238 for_each_child_of_node(node, child) {
352 DBG(" * %s\n", child->full_name); 239 DBG(" * %s\n", child->full_name);
353 reg = of_get_property(child, "reg", &reglen); 240 reg = of_get_property(child, "reg", &reglen);
354 if (reg == NULL || reglen < 20) 241 if (reg == NULL || reglen < 20)
@@ -359,19 +246,26 @@ void __devinit of_scan_bus(struct device_node *node,
359 dev = of_create_pci_dev(child, bus, devfn); 246 dev = of_create_pci_dev(child, bus, devfn);
360 if (!dev) 247 if (!dev)
361 continue; 248 continue;
362 DBG("dev header type: %x\n", dev->hdr_type); 249 DBG(" dev header type: %x\n", dev->hdr_type);
250 }
363 251
252 /* Ally all fixups */
253 pcibios_fixup_of_probed_bus(bus);
254
255 /* Now scan child busses */
256 list_for_each_entry(dev, &bus->devices, bus_list) {
364 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 257 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
365 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 258 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
366 of_scan_pci_bridge(child, dev); 259 struct device_node *child = pci_device_to_OF_node(dev);
260 if (dev)
261 of_scan_pci_bridge(child, dev);
262 }
367 } 263 }
368
369 do_bus_setup(bus);
370} 264}
371EXPORT_SYMBOL(of_scan_bus); 265EXPORT_SYMBOL(of_scan_bus);
372 266
373void __devinit of_scan_pci_bridge(struct device_node *node, 267void __devinit of_scan_pci_bridge(struct device_node *node,
374 struct pci_dev *dev) 268 struct pci_dev *dev)
375{ 269{
376 struct pci_bus *bus; 270 struct pci_bus *bus;
377 const u32 *busrange, *ranges; 271 const u32 *busrange, *ranges;
@@ -441,7 +335,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
441 res->start = of_read_number(&ranges[1], 2); 335 res->start = of_read_number(&ranges[1], 2);
442 res->end = res->start + size - 1; 336 res->end = res->start + size - 1;
443 res->flags = flags; 337 res->flags = flags;
444 fixup_resource(res, dev);
445 } 338 }
446 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 339 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
447 bus->number); 340 bus->number);
@@ -462,12 +355,12 @@ EXPORT_SYMBOL(of_scan_pci_bridge);
462void __devinit scan_phb(struct pci_controller *hose) 355void __devinit scan_phb(struct pci_controller *hose)
463{ 356{
464 struct pci_bus *bus; 357 struct pci_bus *bus;
465 struct device_node *node = hose->arch_data; 358 struct device_node *node = hose->dn;
466 int i, mode; 359 int i, mode;
467 struct resource *res;
468 360
469 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 361 DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
470 362
363 /* Create an empty bus for the toplevel */
471 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 364 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
472 if (bus == NULL) { 365 if (bus == NULL) {
473 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 366 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
@@ -477,27 +370,27 @@ void __devinit scan_phb(struct pci_controller *hose)
477 bus->secondary = hose->first_busno; 370 bus->secondary = hose->first_busno;
478 hose->bus = bus; 371 hose->bus = bus;
479 372
480 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 373 /* Get some IO space for the new PHB */
481 pcibios_map_io_space(bus); 374 pcibios_map_io_space(bus);
482
483 bus->resource[0] = res = &hose->io_resource;
484 if (res->flags && request_resource(&ioport_resource, res)) {
485 printk(KERN_ERR "Failed to request PCI IO region "
486 "on PCI domain %04x\n", hose->global_number);
487 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
488 res->start, res->end);
489 }
490 375
376 /* Wire up PHB bus resources */
377 DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n",
378 hose->io_resource.start, hose->io_resource.end,
379 hose->io_resource.flags);
380 bus->resource[0] = &hose->io_resource;
491 for (i = 0; i < 3; ++i) { 381 for (i = 0; i < 3; ++i) {
492 res = &hose->mem_resources[i]; 382 DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
493 bus->resource[i+1] = res; 383 hose->mem_resources[i].start,
494 if (res->flags && request_resource(&iomem_resource, res)) 384 hose->mem_resources[i].end,
495 printk(KERN_ERR "Failed to request PCI memory region " 385 hose->mem_resources[i].flags);
496 "on PCI domain %04x\n", hose->global_number); 386 bus->resource[i+1] = &hose->mem_resources[i];
497 } 387 }
388 DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
389 DBG("PCI: PHB IO offset = %08lx\n",
390 (unsigned long)hose->io_base_virt - _IO_BASE);
498 391
392 /* Get probe mode and perform scan */
499 mode = PCI_PROBE_NORMAL; 393 mode = PCI_PROBE_NORMAL;
500
501 if (node && ppc_md.pci_probe_mode) 394 if (node && ppc_md.pci_probe_mode)
502 mode = ppc_md.pci_probe_mode(bus); 395 mode = ppc_md.pci_probe_mode(bus);
503 DBG(" probe mode: %d\n", mode); 396 DBG(" probe mode: %d\n", mode);
@@ -514,15 +407,15 @@ static int __init pcibios_init(void)
514{ 407{
515 struct pci_controller *hose, *tmp; 408 struct pci_controller *hose, *tmp;
516 409
410 printk(KERN_INFO "PCI: Probing PCI hardware\n");
411
517 /* For now, override phys_mem_access_prot. If we need it, 412 /* For now, override phys_mem_access_prot. If we need it,
518 * later, we may move that initialization to each ppc_md 413 * later, we may move that initialization to each ppc_md
519 */ 414 */
520 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 415 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
521 416
522 if (firmware_has_feature(FW_FEATURE_ISERIES)) 417 if (pci_probe_only)
523 iSeries_pcibios_init(); 418 ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
524
525 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
526 419
527 /* Scan all of the recorded PCI controllers. */ 420 /* Scan all of the recorded PCI controllers. */
528 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 421 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -530,19 +423,8 @@ static int __init pcibios_init(void)
530 pci_bus_add_devices(hose->bus); 423 pci_bus_add_devices(hose->bus);
531 } 424 }
532 425
533 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 426 /* Call common code to handle resource allocation */
534 if (pci_probe_only) 427 pcibios_resource_survey();
535 pcibios_claim_of_setup();
536 else
537 /* FIXME: `else' will be removed when
538 pci_assign_unassigned_resources() is able to work
539 correctly with [partially] allocated PCI tree. */
540 pci_assign_unassigned_resources();
541 }
542
543 /* Call machine dependent final fixup */
544 if (ppc_md.pcibios_fixup)
545 ppc_md.pcibios_fixup();
546 428
547 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 429 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
548 430
@@ -551,141 +433,6 @@ static int __init pcibios_init(void)
551 433
552subsys_initcall(pcibios_init); 434subsys_initcall(pcibios_init);
553 435
554int pcibios_enable_device(struct pci_dev *dev, int mask)
555{
556 u16 cmd, oldcmd;
557 int i;
558
559 pci_read_config_word(dev, PCI_COMMAND, &cmd);
560 oldcmd = cmd;
561
562 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
563 struct resource *res = &dev->resource[i];
564
565 /* Only set up the requested stuff */
566 if (!(mask & (1<<i)))
567 continue;
568
569 if (res->flags & IORESOURCE_IO)
570 cmd |= PCI_COMMAND_IO;
571 if (res->flags & IORESOURCE_MEM)
572 cmd |= PCI_COMMAND_MEMORY;
573 }
574
575 if (cmd != oldcmd) {
576 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
577 pci_name(dev), cmd);
578 /* Enable the appropriate bits in the PCI command register. */
579 pci_write_config_word(dev, PCI_COMMAND, cmd);
580 }
581 return 0;
582}
583
584/* Decide whether to display the domain number in /proc */
585int pci_proc_domain(struct pci_bus *bus)
586{
587 if (firmware_has_feature(FW_FEATURE_ISERIES))
588 return 0;
589 else {
590 struct pci_controller *hose = pci_bus_to_host(bus);
591 return hose->buid != 0;
592 }
593}
594
595void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
596 struct device_node *dev, int prim)
597{
598 const unsigned int *ranges;
599 unsigned int pci_space;
600 unsigned long size;
601 int rlen = 0;
602 int memno = 0;
603 struct resource *res;
604 int np, na = of_n_addr_cells(dev);
605 unsigned long pci_addr, cpu_phys_addr;
606
607 np = na + 5;
608
609 /* From "PCI Binding to 1275"
610 * The ranges property is laid out as an array of elements,
611 * each of which comprises:
612 * cells 0 - 2: a PCI address
613 * cells 3 or 3+4: a CPU physical address
614 * (size depending on dev->n_addr_cells)
615 * cells 4+5 or 5+6: the size of the range
616 */
617 ranges = of_get_property(dev, "ranges", &rlen);
618 if (ranges == NULL)
619 return;
620 hose->io_base_phys = 0;
621 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
622 res = NULL;
623 pci_space = ranges[0];
624 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
625 cpu_phys_addr = of_translate_address(dev, &ranges[3]);
626 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
627 ranges += np;
628 if (size == 0)
629 continue;
630
631 /* Now consume following elements while they are contiguous */
632 while (rlen >= np * sizeof(unsigned int)) {
633 unsigned long addr, phys;
634
635 if (ranges[0] != pci_space)
636 break;
637 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
638 phys = ranges[3];
639 if (na >= 2)
640 phys = (phys << 32) | ranges[4];
641 if (addr != pci_addr + size ||
642 phys != cpu_phys_addr + size)
643 break;
644
645 size += ((unsigned long)ranges[na+3] << 32)
646 | ranges[na+4];
647 ranges += np;
648 rlen -= np * sizeof(unsigned int);
649 }
650
651 switch ((pci_space >> 24) & 0x3) {
652 case 1: /* I/O space */
653 hose->io_base_phys = cpu_phys_addr - pci_addr;
654 /* handle from 0 to top of I/O window */
655 hose->pci_io_size = pci_addr + size;
656
657 res = &hose->io_resource;
658 res->flags = IORESOURCE_IO;
659 res->start = pci_addr;
660 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
661 res->start, res->start + size - 1);
662 break;
663 case 2: /* memory space */
664 memno = 0;
665 while (memno < 3 && hose->mem_resources[memno].flags)
666 ++memno;
667
668 if (memno == 0)
669 hose->pci_mem_offset = cpu_phys_addr - pci_addr;
670 if (memno < 3) {
671 res = &hose->mem_resources[memno];
672 res->flags = IORESOURCE_MEM;
673 res->start = cpu_phys_addr;
674 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
675 res->start, res->start + size - 1);
676 }
677 break;
678 }
679 if (res != NULL) {
680 res->name = dev->full_name;
681 res->end = res->start + size - 1;
682 res->parent = NULL;
683 res->sibling = NULL;
684 res->child = NULL;
685 }
686 }
687}
688
689#ifdef CONFIG_HOTPLUG 436#ifdef CONFIG_HOTPLUG
690 437
691int pcibios_unmap_io_space(struct pci_bus *bus) 438int pcibios_unmap_io_space(struct pci_bus *bus)
@@ -719,8 +466,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
719 if (hose->io_base_alloc == 0) 466 if (hose->io_base_alloc == 0)
720 return 0; 467 return 0;
721 468
722 DBG("IO unmapping for PHB %s\n", 469 DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
723 ((struct device_node *)hose->arch_data)->full_name);
724 DBG(" alloc=0x%p\n", hose->io_base_alloc); 470 DBG(" alloc=0x%p\n", hose->io_base_alloc);
725 471
726 /* This is a PHB, we fully unmap the IO area */ 472 /* This is a PHB, we fully unmap the IO area */
@@ -779,8 +525,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
779 hose->io_base_virt = (void __iomem *)(area->addr + 525 hose->io_base_virt = (void __iomem *)(area->addr +
780 hose->io_base_phys - phys_page); 526 hose->io_base_phys - phys_page);
781 527
782 DBG("IO mapping for PHB %s\n", 528 DBG("IO mapping for PHB %s\n", hose->dn->full_name);
783 ((struct device_node *)hose->arch_data)->full_name);
784 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 529 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
785 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 530 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
786 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 531 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
@@ -803,51 +548,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
803} 548}
804EXPORT_SYMBOL_GPL(pcibios_map_io_space); 549EXPORT_SYMBOL_GPL(pcibios_map_io_space);
805 550
806static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
807{
808 struct pci_controller *hose = pci_bus_to_host(dev->bus);
809 unsigned long offset;
810
811 if (res->flags & IORESOURCE_IO) {
812 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
813 res->start += offset;
814 res->end += offset;
815 } else if (res->flags & IORESOURCE_MEM) {
816 res->start += hose->pci_mem_offset;
817 res->end += hose->pci_mem_offset;
818 }
819}
820
821void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
822 struct pci_bus *bus)
823{
824 /* Update device resources. */
825 int i;
826
827 DBG("%s: Fixup resources:\n", pci_name(dev));
828 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
829 struct resource *res = &dev->resource[i];
830 if (!res->flags)
831 continue;
832
833 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
834 i, res->flags, res->start, res->end);
835
836 fixup_resource(res, dev);
837
838 DBG(" > %08lx:0x%016lx...0x%016lx\n",
839 res->flags, res->start, res->end);
840 }
841}
842EXPORT_SYMBOL(pcibios_fixup_device_resources);
843
844void __devinit pcibios_setup_new_device(struct pci_dev *dev) 551void __devinit pcibios_setup_new_device(struct pci_dev *dev)
845{ 552{
846 struct dev_archdata *sd = &dev->dev.archdata; 553 struct dev_archdata *sd = &dev->dev.archdata;
847 554
848 sd->of_node = pci_device_to_OF_node(dev); 555 sd->of_node = pci_device_to_OF_node(dev);
849 556
850 DBG("PCI device %s OF node: %s\n", pci_name(dev), 557 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
851 sd->of_node ? sd->of_node->full_name : "<none>"); 558 sd->of_node ? sd->of_node->full_name : "<none>");
852 559
853 sd->dma_ops = pci_dma_ops; 560 sd->dma_ops = pci_dma_ops;
@@ -861,7 +568,7 @@ void __devinit pcibios_setup_new_device(struct pci_dev *dev)
861} 568}
862EXPORT_SYMBOL(pcibios_setup_new_device); 569EXPORT_SYMBOL(pcibios_setup_new_device);
863 570
864static void __devinit do_bus_setup(struct pci_bus *bus) 571void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
865{ 572{
866 struct pci_dev *dev; 573 struct pci_dev *dev;
867 574
@@ -870,42 +577,7 @@ static void __devinit do_bus_setup(struct pci_bus *bus)
870 577
871 list_for_each_entry(dev, &bus->devices, bus_list) 578 list_for_each_entry(dev, &bus->devices, bus_list)
872 pcibios_setup_new_device(dev); 579 pcibios_setup_new_device(dev);
873
874 /* Read default IRQs and fixup if necessary */
875 list_for_each_entry(dev, &bus->devices, bus_list) {
876 pci_read_irq_line(dev);
877 if (ppc_md.pci_irq_fixup)
878 ppc_md.pci_irq_fixup(dev);
879 }
880}
881
882void __devinit pcibios_fixup_bus(struct pci_bus *bus)
883{
884 struct pci_dev *dev = bus->self;
885 struct device_node *np;
886
887 np = pci_bus_to_OF_node(bus);
888
889 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
890
891 if (dev && pci_probe_only &&
892 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
893 /* This is a subordinate bridge */
894
895 pci_read_bridge_bases(bus);
896 pcibios_fixup_device_resources(dev, bus);
897 }
898
899 do_bus_setup(bus);
900
901 if (!pci_probe_only)
902 return;
903
904 list_for_each_entry(dev, &bus->devices, bus_list)
905 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
906 pcibios_fixup_device_resources(dev, bus);
907} 580}
908EXPORT_SYMBOL(pcibios_fixup_bus);
909 581
910unsigned long pci_address_to_pio(phys_addr_t address) 582unsigned long pci_address_to_pio(phys_addr_t address)
911{ 583{
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index b4839038613d..1c67de52e3ce 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -56,11 +56,6 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
56 pdn->busno = (regs[0] >> 16) & 0xff; 56 pdn->busno = (regs[0] >> 16) & 0xff;
57 pdn->devfn = (regs[0] >> 8) & 0xff; 57 pdn->devfn = (regs[0] >> 8) & 0xff;
58 } 58 }
59 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
60 const u32 *busp = of_get_property(dn, "linux,subbus", NULL);
61 if (busp)
62 pdn->bussubno = *busp;
63 }
64 59
65 pdn->pci_ext_config_space = (type && *type == 1); 60 pdn->pci_ext_config_space = (type && *type == 1);
66 return NULL; 61 return NULL;
@@ -133,7 +128,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
133 */ 128 */
134void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb) 129void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
135{ 130{
136 struct device_node * dn = (struct device_node *) phb->arch_data; 131 struct device_node *dn = phb->dn;
137 struct pci_dn *pdn; 132 struct pci_dn *pdn;
138 133
139 /* PHB nodes themselves must not match */ 134 /* PHB nodes themselves must not match */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 13ebeb2d71e6..aa9ff35b0e63 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -59,6 +59,7 @@ extern void single_step_exception(struct pt_regs *regs);
59extern int sys_sigreturn(struct pt_regs *regs); 59extern int sys_sigreturn(struct pt_regs *regs);
60 60
61EXPORT_SYMBOL(clear_pages); 61EXPORT_SYMBOL(clear_pages);
62EXPORT_SYMBOL(copy_page);
62EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 63EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
63EXPORT_SYMBOL(DMA_MODE_READ); 64EXPORT_SYMBOL(DMA_MODE_READ);
64EXPORT_SYMBOL(DMA_MODE_WRITE); 65EXPORT_SYMBOL(DMA_MODE_WRITE);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index acc0d247d3c3..8b5efbce8d90 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
583 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 583 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
584} 584}
585 585
586#ifdef CONFIG_PPC64
587static void __init check_cpu_slb_size(unsigned long node)
588{
589 u32 *slb_size_ptr;
590
591 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
592 if (slb_size_ptr != NULL) {
593 mmu_slb_size = *slb_size_ptr;
594 }
595}
596#else
597#define check_cpu_slb_size(node) do { } while(0)
598#endif
599
586static struct feature_property { 600static struct feature_property {
587 const char *name; 601 const char *name;
588 u32 min_value; 602 u32 min_value;
@@ -600,6 +614,29 @@ static struct feature_property {
600#endif /* CONFIG_PPC64 */ 614#endif /* CONFIG_PPC64 */
601}; 615};
602 616
617#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
618static inline void identical_pvr_fixup(unsigned long node)
619{
620 unsigned int pvr;
621 char *model = of_get_flat_dt_prop(node, "model", NULL);
622
623 /*
624 * Since 440GR(x)/440EP(x) processors have the same pvr,
625 * we check the node path and set bit 28 in the cur_cpu_spec
626 * pvr for EP(x) processor version. This bit is always 0 in
627 * the "real" pvr. Then we call identify_cpu again with
628 * the new logical pvr to enable FPU support.
629 */
630 if (model && strstr(model, "440EP")) {
631 pvr = cur_cpu_spec->pvr_value | 0x8;
632 identify_cpu(0, pvr);
633 DBG("Using logical pvr %x for %s\n", pvr, model);
634 }
635}
636#else
637#define identical_pvr_fixup(node) do { } while(0)
638#endif
639
603static void __init check_cpu_feature_properties(unsigned long node) 640static void __init check_cpu_feature_properties(unsigned long node)
604{ 641{
605 unsigned long i; 642 unsigned long i;
@@ -697,22 +734,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
697 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 734 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
698 if (prop && (*prop & 0xff000000) == 0x0f000000) 735 if (prop && (*prop & 0xff000000) == 0x0f000000)
699 identify_cpu(0, *prop); 736 identify_cpu(0, *prop);
700#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) 737
701 /* 738 identical_pvr_fixup(node);
702 * Since 440GR(x)/440EP(x) processors have the same pvr,
703 * we check the node path and set bit 28 in the cur_cpu_spec
704 * pvr for EP(x) processor version. This bit is always 0 in
705 * the "real" pvr. Then we call identify_cpu again with
706 * the new logical pvr to enable FPU support.
707 */
708 if (strstr(uname, "440EP")) {
709 identify_cpu(0, cur_cpu_spec->pvr_value | 0x8);
710 }
711#endif
712 } 739 }
713 740
714 check_cpu_feature_properties(node); 741 check_cpu_feature_properties(node);
715 check_cpu_pa_features(node); 742 check_cpu_pa_features(node);
743 check_cpu_slb_size(node);
716 744
717#ifdef CONFIG_PPC_PSERIES 745#ifdef CONFIG_PPC_PSERIES
718 if (nthreads > 1) 746 if (nthreads > 1)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5d89a21dd0d6..5ab4c8466cc9 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2142,82 +2142,34 @@ static void __init fixup_device_tree_pmac(void)
2142#endif 2142#endif
2143 2143
2144#ifdef CONFIG_PPC_EFIKA 2144#ifdef CONFIG_PPC_EFIKA
2145/* The current fw of the Efika has a device tree needs quite a few 2145/*
2146 * fixups to be compliant with the mpc52xx bindings. It's currently 2146 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2147 * unknown if it will ever be compliant (come on bPlan ...) so we do fixups. 2147 * to talk to the phy. If the phy-handle property is missing, then this
2148 * NOTE that we (barely) tolerate it because the EFIKA was out before 2148 * function is called to add the appropriate nodes and link it to the
2149 * the bindings were finished, for any new boards -> RTFM ! */ 2149 * ethernet node.
2150 2150 */
2151struct subst_entry { 2151static void __init fixup_device_tree_efika_add_phy(void)
2152 char *path;
2153 char *property;
2154 void *value;
2155 int value_len;
2156};
2157
2158static void __init fixup_device_tree_efika(void)
2159{ 2152{
2160 /* Substitution table */
2161 #define prop_cstr(x) x, sizeof(x)
2162 int prop_sound_irq[3] = { 2, 2, 0 };
2163 int prop_bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2164 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2165 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2166 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2167 struct subst_entry efika_subst_table[] = {
2168 { "/", "device_type", prop_cstr("efika") },
2169 { "/builtin", "device_type", prop_cstr("soc") },
2170 { "/builtin/ata", "compatible", prop_cstr("mpc5200b-ata\0mpc5200-ata"), },
2171 { "/builtin/bestcomm", "compatible", prop_cstr("mpc5200b-bestcomm\0mpc5200-bestcomm") },
2172 { "/builtin/bestcomm", "interrupts", prop_bcomm_irq, sizeof(prop_bcomm_irq) },
2173 { "/builtin/ethernet", "compatible", prop_cstr("mpc5200b-fec\0mpc5200-fec") },
2174 { "/builtin/pic", "compatible", prop_cstr("mpc5200b-pic\0mpc5200-pic") },
2175 { "/builtin/serial", "compatible", prop_cstr("mpc5200b-psc-uart\0mpc5200-psc-uart") },
2176 { "/builtin/sound", "compatible", prop_cstr("mpc5200b-psc-ac97\0mpc5200-psc-ac97") },
2177 { "/builtin/sound", "interrupts", prop_sound_irq, sizeof(prop_sound_irq) },
2178 { "/builtin/sram", "compatible", prop_cstr("mpc5200b-sram\0mpc5200-sram") },
2179 { "/builtin/sram", "device_type", prop_cstr("sram") },
2180 {}
2181 };
2182 #undef prop_cstr
2183
2184 /* Vars */
2185 u32 node; 2153 u32 node;
2186 char prop[64]; 2154 char prop[64];
2187 int rv, i; 2155 int rv;
2188 2156
2189 /* Check if we're really running on a EFIKA */ 2157 /* Check if /builtin/ethernet exists - bail if it doesn't */
2190 node = call_prom("finddevice", 1, 1, ADDR("/")); 2158 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2191 if (!PHANDLE_VALID(node)) 2159 if (!PHANDLE_VALID(node))
2192 return; 2160 return;
2193 2161
2194 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2162 /* Check if the phy-handle property exists - bail if it does */
2195 if (rv == PROM_ERROR) 2163 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2196 return; 2164 if (!rv)
2197 if (strcmp(prop, "EFIKA5K2"))
2198 return; 2165 return;
2199 2166
2200 prom_printf("Applying EFIKA device tree fixups\n"); 2167 /*
2201 2168 * At this point the ethernet device doesn't have a phy described.
2202 /* Process substitution table */ 2169 * Now we need to add the missing phy node and linkage
2203 for (i=0; efika_subst_table[i].path; i++) { 2170 */
2204 struct subst_entry *se = &efika_subst_table[i];
2205
2206 node = call_prom("finddevice", 1, 1, ADDR(se->path));
2207 if (!PHANDLE_VALID(node)) {
2208 prom_printf("fixup_device_tree_efika: ",
2209 "skipped entry %x - not found\n", i);
2210 continue;
2211 }
2212
2213 rv = prom_setprop(node, se->path, se->property,
2214 se->value, se->value_len );
2215 if (rv == PROM_ERROR)
2216 prom_printf("fixup_device_tree_efika: ",
2217 "skipped entry %x - setprop error\n", i);
2218 }
2219 2171
2220 /* Make sure ethernet mdio bus node exists */ 2172 /* Check for an MDIO bus node - if missing then create one */
2221 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2173 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2222 if (!PHANDLE_VALID(node)) { 2174 if (!PHANDLE_VALID(node)) {
2223 prom_printf("Adding Ethernet MDIO node\n"); 2175 prom_printf("Adding Ethernet MDIO node\n");
@@ -2226,8 +2178,8 @@ static void __init fixup_device_tree_efika(void)
2226 " new-device" 2178 " new-device"
2227 " 1 encode-int s\" #address-cells\" property" 2179 " 1 encode-int s\" #address-cells\" property"
2228 " 0 encode-int s\" #size-cells\" property" 2180 " 0 encode-int s\" #size-cells\" property"
2229 " s\" mdio\" 2dup device-name device-type" 2181 " s\" mdio\" device-name"
2230 " s\" mpc5200b-fec-phy\" encode-string" 2182 " s\" fsl,mpc5200b-mdio\" encode-string"
2231 " s\" compatible\" property" 2183 " s\" compatible\" property"
2232 " 0xf0003000 0x400 reg" 2184 " 0xf0003000 0x400 reg"
2233 " 0x2 encode-int" 2185 " 0x2 encode-int"
@@ -2237,8 +2189,10 @@ static void __init fixup_device_tree_efika(void)
2237 " finish-device"); 2189 " finish-device");
2238 }; 2190 };
2239 2191
2240 /* Make sure ethernet phy device node exist */ 2192 /* Check for a PHY device node - if missing then create one and
2241 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio/ethernet-phy")); 2193 * give it's phandle to the ethernet node */
2194 node = call_prom("finddevice", 1, 1,
2195 ADDR("/builtin/mdio/ethernet-phy"));
2242 if (!PHANDLE_VALID(node)) { 2196 if (!PHANDLE_VALID(node)) {
2243 prom_printf("Adding Ethernet PHY node\n"); 2197 prom_printf("Adding Ethernet PHY node\n");
2244 call_prom("interpret", 1, 1, 2198 call_prom("interpret", 1, 1,
@@ -2254,7 +2208,62 @@ static void __init fixup_device_tree_efika(void)
2254 " s\" phy-handle\" property" 2208 " s\" phy-handle\" property"
2255 " device-end"); 2209 " device-end");
2256 } 2210 }
2211}
2212
2213static void __init fixup_device_tree_efika(void)
2214{
2215 int sound_irq[3] = { 2, 2, 0 };
2216 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2217 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2218 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2219 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2220 u32 node;
2221 char prop[64];
2222 int rv, len;
2223
2224 /* Check if we're really running on a EFIKA */
2225 node = call_prom("finddevice", 1, 1, ADDR("/"));
2226 if (!PHANDLE_VALID(node))
2227 return;
2228
2229 rv = prom_getprop(node, "model", prop, sizeof(prop));
2230 if (rv == PROM_ERROR)
2231 return;
2232 if (strcmp(prop, "EFIKA5K2"))
2233 return;
2234
2235 prom_printf("Applying EFIKA device tree fixups\n");
2236
2237 /* Claiming to be 'chrp' is death */
2238 node = call_prom("finddevice", 1, 1, ADDR("/"));
2239 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2240 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2241 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2242
2243 /* Fixup bestcomm interrupts property */
2244 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2245 if (PHANDLE_VALID(node)) {
2246 len = prom_getproplen(node, "interrupts");
2247 if (len == 12) {
2248 prom_printf("Fixing bestcomm interrupts property\n");
2249 prom_setprop(node, "/builtin/bestcom", "interrupts",
2250 bcomm_irq, sizeof(bcomm_irq));
2251 }
2252 }
2253
2254 /* Fixup sound interrupts property */
2255 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2256 if (PHANDLE_VALID(node)) {
2257 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2258 if (rv == PROM_ERROR) {
2259 prom_printf("Adding sound interrupts property\n");
2260 prom_setprop(node, "/builtin/sound", "interrupts",
2261 sound_irq, sizeof(sound_irq));
2262 }
2263 }
2257 2264
2265 /* Make sure ethernet phy-handle property exists */
2266 fixup_device_tree_efika_add_phy();
2258} 2267}
2259#else 2268#else
2260#define fixup_device_tree_efika() 2269#define fixup_device_tree_efika()
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index b5c96af955c6..90eb3a3e383e 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -273,7 +273,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
273#else 273#else
274 struct pci_controller *host; 274 struct pci_controller *host;
275 host = pci_bus_to_host(pdev->bus); 275 host = pci_bus_to_host(pdev->bus);
276 ppnode = host ? host->arch_data : NULL; 276 ppnode = host ? host->dn : NULL;
277#endif 277#endif
278 /* No node for host bridge ? give up */ 278 /* No node for host bridge ? give up */
279 if (ppnode == NULL) 279 if (ppnode == NULL)
@@ -419,7 +419,7 @@ static struct of_bus *of_match_bus(struct device_node *np)
419 419
420static int of_translate_one(struct device_node *parent, struct of_bus *bus, 420static int of_translate_one(struct device_node *parent, struct of_bus *bus,
421 struct of_bus *pbus, u32 *addr, 421 struct of_bus *pbus, u32 *addr,
422 int na, int ns, int pna) 422 int na, int ns, int pna, const char *rprop)
423{ 423{
424 const u32 *ranges; 424 const u32 *ranges;
425 unsigned int rlen; 425 unsigned int rlen;
@@ -438,7 +438,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
438 * to translate addresses that aren't supposed to be translated in 438 * to translate addresses that aren't supposed to be translated in
439 * the first place. --BenH. 439 * the first place. --BenH.
440 */ 440 */
441 ranges = of_get_property(parent, "ranges", &rlen); 441 ranges = of_get_property(parent, rprop, &rlen);
442 if (ranges == NULL || rlen == 0) { 442 if (ranges == NULL || rlen == 0) {
443 offset = of_read_number(addr, na); 443 offset = of_read_number(addr, na);
444 memset(addr, 0, pna * 4); 444 memset(addr, 0, pna * 4);
@@ -481,7 +481,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
481 * that can be mapped to a cpu physical address). This is not really specified 481 * that can be mapped to a cpu physical address). This is not really specified
482 * that way, but this is traditionally the way IBM at least do things 482 * that way, but this is traditionally the way IBM at least do things
483 */ 483 */
484u64 of_translate_address(struct device_node *dev, const u32 *in_addr) 484u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
485 const char *rprop)
485{ 486{
486 struct device_node *parent = NULL; 487 struct device_node *parent = NULL;
487 struct of_bus *bus, *pbus; 488 struct of_bus *bus, *pbus;
@@ -540,7 +541,7 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
540 pbus->name, pna, pns, parent->full_name); 541 pbus->name, pna, pns, parent->full_name);
541 542
542 /* Apply bus translation */ 543 /* Apply bus translation */
543 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna)) 544 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
544 break; 545 break;
545 546
546 /* Complete the move up one level */ 547 /* Complete the move up one level */
@@ -556,8 +557,19 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
556 557
557 return result; 558 return result;
558} 559}
560
561u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
562{
563 return __of_translate_address(dev, in_addr, "ranges");
564}
559EXPORT_SYMBOL(of_translate_address); 565EXPORT_SYMBOL(of_translate_address);
560 566
567u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
568{
569 return __of_translate_address(dev, in_addr, "dma-ranges");
570}
571EXPORT_SYMBOL(of_translate_dma_address);
572
561const u32 *of_get_address(struct device_node *dev, int index, u64 *size, 573const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
562 unsigned int *flags) 574 unsigned int *flags)
563{ 575{
diff --git a/arch/powerpc/kernel/rio.c b/arch/powerpc/kernel/rio.c
new file mode 100644
index 000000000000..29487fedfc76
--- /dev/null
+++ b/arch/powerpc/kernel/rio.c
@@ -0,0 +1,52 @@
1/*
2 * RapidIO PPC32 support
3 *
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/rio.h>
16
17#include <asm/rio.h>
18
19/**
20 * platform_rio_init - Do platform specific RIO init
21 *
22 * Any platform specific initialization of RapdIO
23 * hardware is done here as well as registration
24 * of any active master ports in the system.
25 */
26void __attribute__ ((weak))
27 platform_rio_init(void)
28{
29 printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
30}
31
32/**
33 * ppc_rio_init - Do PPC32 RIO init
34 *
35 * Calls platform-specific RIO init code and then calls
36 * rio_init_mports() to initialize any master ports that
37 * have been registered with the RIO subsystem.
38 */
39static int __init ppc_rio_init(void)
40{
41 printk(KERN_INFO "RIO: RapidIO init\n");
42
43 /* Platform specific initialization */
44 platform_rio_init();
45
46 /* Enumerate all registered ports */
47 rio_init_mports();
48
49 return 0;
50}
51
52subsys_initcall(ppc_rio_init);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 21f14e57d1f3..433a0a0949fb 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -260,7 +260,7 @@ static int phb_set_bus_ranges(struct device_node *dev,
260 260
261int __devinit rtas_setup_phb(struct pci_controller *phb) 261int __devinit rtas_setup_phb(struct pci_controller *phb)
262{ 262{
263 struct device_node *dev = phb->arch_data; 263 struct device_node *dev = phb->dn;
264 264
265 if (is_python(dev)) 265 if (is_python(dev))
266 python_countermeasures(dev); 266 python_countermeasures(dev);
@@ -280,10 +280,7 @@ void __init find_and_init_phbs(void)
280 struct pci_controller *phb; 280 struct pci_controller *phb;
281 struct device_node *root = of_find_node_by_path("/"); 281 struct device_node *root = of_find_node_by_path("/");
282 282
283 for (node = of_get_next_child(root, NULL); 283 for_each_child_of_node(root, node) {
284 node != NULL;
285 node = of_get_next_child(root, node)) {
286
287 if (node->type == NULL || (strcmp(node->type, "pci") != 0 && 284 if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
288 strcmp(node->type, "pciex") != 0)) 285 strcmp(node->type, "pciex") != 0))
289 continue; 286 continue;
@@ -311,10 +308,12 @@ void __init find_and_init_phbs(void)
311 if (prop) 308 if (prop)
312 pci_probe_only = *prop; 309 pci_probe_only = *prop;
313 310
311#ifdef CONFIG_PPC32 /* Will be made generic soon */
314 prop = of_get_property(of_chosen, 312 prop = of_get_property(of_chosen,
315 "linux,pci-assign-all-buses", NULL); 313 "linux,pci-assign-all-buses", NULL);
316 if (prop) 314 if (prop && *prop)
317 pci_assign_all_buses = *prop; 315 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
316#endif /* CONFIG_PPC32 */
318 } 317 }
319} 318}
320 319
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2de00f870edc..6adb5a1e98bb 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
33#include <linux/serial.h> 33#include <linux/serial.h>
34#include <linux/serial_8250.h> 34#include <linux/serial_8250.h>
35#include <linux/debugfs.h> 35#include <linux/debugfs.h>
36#include <linux/percpu.h>
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/prom.h> 38#include <asm/prom.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
@@ -57,6 +58,7 @@
57#include <asm/mmu.h> 58#include <asm/mmu.h>
58#include <asm/lmb.h> 59#include <asm/lmb.h>
59#include <asm/xmon.h> 60#include <asm/xmon.h>
61#include <asm/cputhreads.h>
60 62
61#include "setup.h" 63#include "setup.h"
62 64
@@ -327,6 +329,31 @@ void __init check_for_initrd(void)
327 329
328#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
329 331
332int threads_per_core, threads_shift;
333cpumask_t threads_core_mask;
334
335static void __init cpu_init_thread_core_maps(int tpc)
336{
337 int i;
338
339 threads_per_core = tpc;
340 threads_core_mask = CPU_MASK_NONE;
341
342 /* This implementation only supports power of 2 number of threads
343 * for simplicity and performance
344 */
345 threads_shift = ilog2(tpc);
346 BUG_ON(tpc != (1 << threads_shift));
347
348 for (i = 0; i < tpc; i++)
349 cpu_set(i, threads_core_mask);
350
351 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
352 tpc, tpc > 1 ? "s" : "");
353 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
354}
355
356
330/** 357/**
331 * setup_cpu_maps - initialize the following cpu maps: 358 * setup_cpu_maps - initialize the following cpu maps:
332 * cpu_possible_map 359 * cpu_possible_map
@@ -350,22 +377,32 @@ void __init smp_setup_cpu_maps(void)
350{ 377{
351 struct device_node *dn = NULL; 378 struct device_node *dn = NULL;
352 int cpu = 0; 379 int cpu = 0;
380 int nthreads = 1;
381
382 DBG("smp_setup_cpu_maps()\n");
353 383
354 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 384 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
355 const int *intserv; 385 const int *intserv;
356 int j, len = sizeof(u32), nthreads = 1; 386 int j, len;
387
388 DBG(" * %s...\n", dn->full_name);
357 389
358 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", 390 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
359 &len); 391 &len);
360 if (intserv) 392 if (intserv) {
361 nthreads = len / sizeof(int); 393 nthreads = len / sizeof(int);
362 else { 394 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
395 nthreads);
396 } else {
397 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
363 intserv = of_get_property(dn, "reg", NULL); 398 intserv = of_get_property(dn, "reg", NULL);
364 if (!intserv) 399 if (!intserv)
365 intserv = &cpu; /* assume logical == phys */ 400 intserv = &cpu; /* assume logical == phys */
366 } 401 }
367 402
368 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 403 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
404 DBG(" thread %d -> cpu %d (hard id %d)\n",
405 j, cpu, intserv[j]);
369 cpu_set(cpu, cpu_present_map); 406 cpu_set(cpu, cpu_present_map);
370 set_hard_smp_processor_id(cpu, intserv[j]); 407 set_hard_smp_processor_id(cpu, intserv[j]);
371 cpu_set(cpu, cpu_possible_map); 408 cpu_set(cpu, cpu_possible_map);
@@ -373,6 +410,12 @@ void __init smp_setup_cpu_maps(void)
373 } 410 }
374 } 411 }
375 412
413 /* If no SMT supported, nthreads is forced to 1 */
414 if (!cpu_has_feature(CPU_FTR_SMT)) {
415 DBG(" SMT disabled ! nthreads forced to 1\n");
416 nthreads = 1;
417 }
418
376#ifdef CONFIG_PPC64 419#ifdef CONFIG_PPC64
377 /* 420 /*
378 * On pSeries LPAR, we need to know how many cpus 421 * On pSeries LPAR, we need to know how many cpus
@@ -395,7 +438,7 @@ void __init smp_setup_cpu_maps(void)
395 438
396 /* Double maxcpus for processors which have SMT capability */ 439 /* Double maxcpus for processors which have SMT capability */
397 if (cpu_has_feature(CPU_FTR_SMT)) 440 if (cpu_has_feature(CPU_FTR_SMT))
398 maxcpus *= 2; 441 maxcpus *= nthreads;
399 442
400 if (maxcpus > NR_CPUS) { 443 if (maxcpus > NR_CPUS) {
401 printk(KERN_WARNING 444 printk(KERN_WARNING
@@ -412,9 +455,16 @@ void __init smp_setup_cpu_maps(void)
412 out: 455 out:
413 of_node_put(dn); 456 of_node_put(dn);
414 } 457 }
415
416 vdso_data->processorCount = num_present_cpus(); 458 vdso_data->processorCount = num_present_cpus();
417#endif /* CONFIG_PPC64 */ 459#endif /* CONFIG_PPC64 */
460
461 /* Initialize CPU <=> thread mapping/
462 *
463 * WARNING: We assume that the number of threads is the same for
464 * every CPU in the system. If that is not the case, then some code
465 * here will have to be reworked
466 */
467 cpu_init_thread_core_maps(nthreads);
418} 468}
419 469
420/* 470/*
@@ -424,17 +474,19 @@ void __init smp_setup_cpu_maps(void)
424 */ 474 */
425void __init smp_setup_cpu_sibling_map(void) 475void __init smp_setup_cpu_sibling_map(void)
426{ 476{
427#if defined(CONFIG_PPC64) 477#ifdef CONFIG_PPC64
428 int cpu; 478 int i, cpu, base;
429 479
430 /*
431 * Do the sibling map; assume only two threads per processor.
432 */
433 for_each_possible_cpu(cpu) { 480 for_each_possible_cpu(cpu) {
434 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 481 DBG("Sibling map for CPU %d:", cpu);
435 if (cpu_has_feature(CPU_FTR_SMT)) 482 base = cpu_first_thread_in_core(cpu);
436 cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu)); 483 for (i = 0; i < threads_per_core; i++) {
484 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
485 DBG(" %d", base + i);
486 }
487 DBG("\n");
437 } 488 }
489
438#endif /* CONFIG_PPC64 */ 490#endif /* CONFIG_PPC64 */
439} 491}
440#endif /* CONFIG_SMP */ 492#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 6126bca8b70a..d840bc772fd3 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -24,13 +24,12 @@
24#include <linux/signal.h> 24#include <linux/signal.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/elf.h> 26#include <linux/elf.h>
27#include <linux/ptrace.h>
27#ifdef CONFIG_PPC64 28#ifdef CONFIG_PPC64
28#include <linux/syscalls.h> 29#include <linux/syscalls.h>
29#include <linux/compat.h> 30#include <linux/compat.h>
30#include <linux/ptrace.h>
31#else 31#else
32#include <linux/wait.h> 32#include <linux/wait.h>
33#include <linux/ptrace.h>
34#include <linux/unistd.h> 33#include <linux/unistd.h>
35#include <linux/stddef.h> 34#include <linux/stddef.h>
36#include <linux/tty.h> 35#include <linux/tty.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 338950aeb6f6..be35ffae10f0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -76,6 +76,8 @@ void smp_call_function_interrupt(void);
76 76
77int smt_enabled_at_boot = 1; 77int smt_enabled_at_boot = 1;
78 78
79static int ipi_fail_ok;
80
79static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 81static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
80 82
81#ifdef CONFIG_PPC64 83#ifdef CONFIG_PPC64
@@ -181,12 +183,13 @@ static struct call_data_struct {
181 * <wait> If true, wait (atomically) until function has completed on other CPUs. 183 * <wait> If true, wait (atomically) until function has completed on other CPUs.
182 * [RETURNS] 0 on success, else a negative status code. Does not return until 184 * [RETURNS] 0 on success, else a negative status code. Does not return until
183 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 185 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
186 * <map> is a cpu map of the cpus to send IPI to.
184 * 187 *
185 * You must not call this function with disabled interrupts or from a 188 * You must not call this function with disabled interrupts or from a
186 * hardware interrupt handler or from a bottom half handler. 189 * hardware interrupt handler or from a bottom half handler.
187 */ 190 */
188int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, 191static int __smp_call_function_map(void (*func) (void *info), void *info,
189 int wait, cpumask_t map) 192 int nonatomic, int wait, cpumask_t map)
190{ 193{
191 struct call_data_struct data; 194 struct call_data_struct data;
192 int ret = -1, num_cpus; 195 int ret = -1, num_cpus;
@@ -203,8 +206,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
203 if (wait) 206 if (wait)
204 atomic_set(&data.finished, 0); 207 atomic_set(&data.finished, 0);
205 208
206 spin_lock(&call_lock);
207
208 /* remove 'self' from the map */ 209 /* remove 'self' from the map */
209 if (cpu_isset(smp_processor_id(), map)) 210 if (cpu_isset(smp_processor_id(), map))
210 cpu_clear(smp_processor_id(), map); 211 cpu_clear(smp_processor_id(), map);
@@ -231,7 +232,8 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
231 printk("smp_call_function on cpu %d: other cpus not " 232 printk("smp_call_function on cpu %d: other cpus not "
232 "responding (%d)\n", smp_processor_id(), 233 "responding (%d)\n", smp_processor_id(),
233 atomic_read(&data.started)); 234 atomic_read(&data.started));
234 debugger(NULL); 235 if (!ipi_fail_ok)
236 debugger(NULL);
235 goto out; 237 goto out;
236 } 238 }
237 } 239 }
@@ -258,14 +260,18 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
258 out: 260 out:
259 call_data = NULL; 261 call_data = NULL;
260 HMT_medium(); 262 HMT_medium();
261 spin_unlock(&call_lock);
262 return ret; 263 return ret;
263} 264}
264 265
265static int __smp_call_function(void (*func)(void *info), void *info, 266static int __smp_call_function(void (*func)(void *info), void *info,
266 int nonatomic, int wait) 267 int nonatomic, int wait)
267{ 268{
268 return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map); 269 int ret;
270 spin_lock(&call_lock);
271 ret =__smp_call_function_map(func, info, nonatomic, wait,
272 cpu_online_map);
273 spin_unlock(&call_lock);
274 return ret;
269} 275}
270 276
271int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 277int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
@@ -278,8 +284,8 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
278} 284}
279EXPORT_SYMBOL(smp_call_function); 285EXPORT_SYMBOL(smp_call_function);
280 286
281int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic, 287int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
282 int wait) 288 int nonatomic, int wait)
283{ 289{
284 cpumask_t map = CPU_MASK_NONE; 290 cpumask_t map = CPU_MASK_NONE;
285 int ret = 0; 291 int ret = 0;
@@ -291,9 +297,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
291 return -EINVAL; 297 return -EINVAL;
292 298
293 cpu_set(cpu, map); 299 cpu_set(cpu, map);
294 if (cpu != get_cpu()) 300 if (cpu != get_cpu()) {
295 ret = smp_call_function_map(func,info,nonatomic,wait,map); 301 spin_lock(&call_lock);
296 else { 302 ret = __smp_call_function_map(func, info, nonatomic, wait, map);
303 spin_unlock(&call_lock);
304 } else {
297 local_irq_disable(); 305 local_irq_disable();
298 func(info); 306 func(info);
299 local_irq_enable(); 307 local_irq_enable();
@@ -305,7 +313,22 @@ EXPORT_SYMBOL(smp_call_function_single);
305 313
306void smp_send_stop(void) 314void smp_send_stop(void)
307{ 315{
308 __smp_call_function(stop_this_cpu, NULL, 1, 0); 316 int nolock;
317
318 /* It's OK to fail sending the IPI, since the alternative is to
319 * be stuck forever waiting on the other CPU to take the interrupt.
320 *
321 * It's better to at least continue and go through reboot, since this
322 * function is usually called at panic or reboot time in the first
323 * place.
324 */
325 ipi_fail_ok = 1;
326
327 /* Don't deadlock in case we got called through panic */
328 nolock = !spin_trylock(&call_lock);
329 __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
330 if (!nolock)
331 spin_unlock(&call_lock);
309} 332}
310 333
311void smp_call_function_interrupt(void) 334void smp_call_function_interrupt(void)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
new file mode 100644
index 000000000000..238aa63ced8f
--- /dev/null
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -0,0 +1,58 @@
1/*
2 * This file, when run through CPP produces a list of syscall numbers
3 * in the order of systbl.h. That way we can check for gaps and syscalls
4 * that are out of order.
5 *
6 * Unfortunately, we cannot check for the correct ordering of entries
7 * using SYSX().
8 *
9 * Copyright © IBM Corporation
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#include <asm/unistd.h>
17
18#define SYSCALL(func) __NR_##func
19#define COMPAT_SYS(func) __NR_##func
20#define PPC_SYS(func) __NR_##func
21#ifdef CONFIG_PPC64
22#define OLDSYS(func) -1
23#define SYS32ONLY(func) -1
24#else
25#define OLDSYS(func) __NR_old##func
26#define SYS32ONLY(func) __NR_##func
27#endif
28#define SYSX(f, f3264, f32) -1
29
30#define SYSCALL_SPU(func) SYSCALL(func)
31#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
32#define PPC_SYS_SPU(func) PPC_SYS(func)
33#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
34
35/* Just insert a marker for ni_syscalls */
36#define __NR_ni_syscall -1
37
38/*
39 * These are the known exceptions.
40 * Hopefully, there will be no more.
41 */
42#define __NR_llseek __NR__llseek
43#undef __NR_umount
44#define __NR_umount __NR_umount2
45#define __NR_old_getrlimit __NR_getrlimit
46#define __NR_newstat __NR_stat
47#define __NR_newlstat __NR_lstat
48#define __NR_newfstat __NR_fstat
49#define __NR_newuname __NR_uname
50#define __NR_sysctl __NR__sysctl
51#define __NR_olddebug_setcontext __NR_sys_debug_setcontext
52
53/* We call sys_ugetrlimit for syscall number __NR_getrlimit */
54#define getrlimit ugetrlimit
55
56START_TABLE
57#include <asm/systbl.h>
58END_TABLE __NR_syscalls
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
new file mode 100644
index 000000000000..19415e7674a5
--- /dev/null
+++ b/arch/powerpc/kernel/systbl_chk.sh
@@ -0,0 +1,33 @@
1#!/bin/sh
2#
3# Just process the CPP output from systbl_chk.c and complain
4# if anything is out of order.
5#
6# Copyright © 2008 IBM Corporation
7#
8# This program is free software; you can redistribute it and/or
9# modify it under the terms of the GNU General Public License
10# as published by the Free Software Foundation; either version
11# 2 of the License, or (at your option) any later version.
12
13awk 'BEGIN { num = -1; } # Ignore the beginning of the file
14 /^#/ { next; }
15 /^[ \t]*$/ { next; }
16 /^START_TABLE/ { num = 0; next; }
17 /^END_TABLE/ {
18 if (num != $2) {
19 printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
20 $2, num - 1;
21 exit(1);
22 }
23 num = -1; # Ignore the rest of the file
24 }
25 {
26 if (num == -1) next;
27 if (($1 != -1) && ($1 != num)) {
28 printf "Syscall %s out of order (expected %s)\n",
29 $1, num;
30 exit(1);
31 };
32 num++;
33 }' "$1"
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a925a8eae121..5cd3db5cae41 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -116,9 +116,12 @@ static struct clock_event_device decrementer_clockevent = {
116 .features = CLOCK_EVT_FEAT_ONESHOT, 116 .features = CLOCK_EVT_FEAT_ONESHOT,
117}; 117};
118 118
119static DEFINE_PER_CPU(struct clock_event_device, decrementers); 119struct decrementer_clock {
120void init_decrementer_clockevent(void); 120 struct clock_event_device event;
121static DEFINE_PER_CPU(u64, decrementer_next_tb); 121 u64 next_tb;
122};
123
124static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
122 125
123#ifdef CONFIG_PPC_ISERIES 126#ifdef CONFIG_PPC_ISERIES
124static unsigned long __initdata iSeries_recal_titan; 127static unsigned long __initdata iSeries_recal_titan;
@@ -216,7 +219,11 @@ static u64 read_purr(void)
216 */ 219 */
217static u64 read_spurr(u64 purr) 220static u64 read_spurr(u64 purr)
218{ 221{
219 if (cpu_has_feature(CPU_FTR_SPURR)) 222 /*
223 * cpus without PURR won't have a SPURR
224 * We already know the former when we use this, so tell gcc
225 */
226 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
220 return mfspr(SPRN_SPURR); 227 return mfspr(SPRN_SPURR);
221 return purr; 228 return purr;
222} 229}
@@ -227,29 +234,30 @@ static u64 read_spurr(u64 purr)
227 */ 234 */
228void account_system_vtime(struct task_struct *tsk) 235void account_system_vtime(struct task_struct *tsk)
229{ 236{
230 u64 now, nowscaled, delta, deltascaled; 237 u64 now, nowscaled, delta, deltascaled, sys_time;
231 unsigned long flags; 238 unsigned long flags;
232 239
233 local_irq_save(flags); 240 local_irq_save(flags);
234 now = read_purr(); 241 now = read_purr();
235 delta = now - get_paca()->startpurr;
236 get_paca()->startpurr = now;
237 nowscaled = read_spurr(now); 242 nowscaled = read_spurr(now);
243 delta = now - get_paca()->startpurr;
238 deltascaled = nowscaled - get_paca()->startspurr; 244 deltascaled = nowscaled - get_paca()->startspurr;
245 get_paca()->startpurr = now;
239 get_paca()->startspurr = nowscaled; 246 get_paca()->startspurr = nowscaled;
240 if (!in_interrupt()) { 247 if (!in_interrupt()) {
241 /* deltascaled includes both user and system time. 248 /* deltascaled includes both user and system time.
242 * Hence scale it based on the purr ratio to estimate 249 * Hence scale it based on the purr ratio to estimate
243 * the system time */ 250 * the system time */
251 sys_time = get_paca()->system_time;
244 if (get_paca()->user_time) 252 if (get_paca()->user_time)
245 deltascaled = deltascaled * get_paca()->system_time / 253 deltascaled = deltascaled * sys_time /
246 (get_paca()->system_time + get_paca()->user_time); 254 (sys_time + get_paca()->user_time);
247 delta += get_paca()->system_time; 255 delta += sys_time;
248 get_paca()->system_time = 0; 256 get_paca()->system_time = 0;
249 } 257 }
250 account_system_time(tsk, 0, delta); 258 account_system_time(tsk, 0, delta);
251 get_paca()->purrdelta = delta;
252 account_system_time_scaled(tsk, deltascaled); 259 account_system_time_scaled(tsk, deltascaled);
260 get_paca()->purrdelta = delta;
253 get_paca()->spurrdelta = deltascaled; 261 get_paca()->spurrdelta = deltascaled;
254 local_irq_restore(flags); 262 local_irq_restore(flags);
255} 263}
@@ -326,11 +334,9 @@ void calculate_steal_time(void)
326 s64 stolen; 334 s64 stolen;
327 struct cpu_purr_data *pme; 335 struct cpu_purr_data *pme;
328 336
329 if (!cpu_has_feature(CPU_FTR_PURR)) 337 pme = &__get_cpu_var(cpu_purr_data);
330 return;
331 pme = &per_cpu(cpu_purr_data, smp_processor_id());
332 if (!pme->initialized) 338 if (!pme->initialized)
333 return; /* this can happen in early boot */ 339 return; /* !CPU_FTR_PURR or early in early boot */
334 tb = mftb(); 340 tb = mftb();
335 purr = mfspr(SPRN_PURR); 341 purr = mfspr(SPRN_PURR);
336 stolen = (tb - pme->tb) - (purr - pme->purr); 342 stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -353,7 +359,7 @@ static void snapshot_purr(void)
353 if (!cpu_has_feature(CPU_FTR_PURR)) 359 if (!cpu_has_feature(CPU_FTR_PURR))
354 return; 360 return;
355 local_irq_save(flags); 361 local_irq_save(flags);
356 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 362 pme = &__get_cpu_var(cpu_purr_data);
357 pme->tb = mftb(); 363 pme->tb = mftb();
358 pme->purr = mfspr(SPRN_PURR); 364 pme->purr = mfspr(SPRN_PURR);
359 pme->initialized = 1; 365 pme->initialized = 1;
@@ -556,8 +562,8 @@ void __init iSeries_time_init_early(void)
556void timer_interrupt(struct pt_regs * regs) 562void timer_interrupt(struct pt_regs * regs)
557{ 563{
558 struct pt_regs *old_regs; 564 struct pt_regs *old_regs;
559 int cpu = smp_processor_id(); 565 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
560 struct clock_event_device *evt = &per_cpu(decrementers, cpu); 566 struct clock_event_device *evt = &decrementer->event;
561 u64 now; 567 u64 now;
562 568
563 /* Ensure a positive value is written to the decrementer, or else 569 /* Ensure a positive value is written to the decrementer, or else
@@ -570,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs)
570#endif 576#endif
571 577
572 now = get_tb_or_rtc(); 578 now = get_tb_or_rtc();
573 if (now < per_cpu(decrementer_next_tb, cpu)) { 579 if (now < decrementer->next_tb) {
574 /* not time for this event yet */ 580 /* not time for this event yet */
575 now = per_cpu(decrementer_next_tb, cpu) - now; 581 now = decrementer->next_tb - now;
576 if (now <= DECREMENTER_MAX) 582 if (now <= DECREMENTER_MAX)
577 set_dec((int)now); 583 set_dec((int)now);
578 return; 584 return;
@@ -623,6 +629,45 @@ void wakeup_decrementer(void)
623 set_dec(ticks); 629 set_dec(ticks);
624} 630}
625 631
632#ifdef CONFIG_SUSPEND
633void generic_suspend_disable_irqs(void)
634{
635 preempt_disable();
636
637 /* Disable the decrementer, so that it doesn't interfere
638 * with suspending.
639 */
640
641 set_dec(0x7fffffff);
642 local_irq_disable();
643 set_dec(0x7fffffff);
644}
645
646void generic_suspend_enable_irqs(void)
647{
648 wakeup_decrementer();
649
650 local_irq_enable();
651 preempt_enable();
652}
653
654/* Overrides the weak version in kernel/power/main.c */
655void arch_suspend_disable_irqs(void)
656{
657 if (ppc_md.suspend_disable_irqs)
658 ppc_md.suspend_disable_irqs();
659 generic_suspend_disable_irqs();
660}
661
662/* Overrides the weak version in kernel/power/main.c */
663void arch_suspend_enable_irqs(void)
664{
665 generic_suspend_enable_irqs();
666 if (ppc_md.suspend_enable_irqs)
667 ppc_md.suspend_enable_irqs();
668}
669#endif
670
626#ifdef CONFIG_SMP 671#ifdef CONFIG_SMP
627void __init smp_space_timers(unsigned int max_cpus) 672void __init smp_space_timers(unsigned int max_cpus)
628{ 673{
@@ -811,7 +856,7 @@ void __init clocksource_init(void)
811static int decrementer_set_next_event(unsigned long evt, 856static int decrementer_set_next_event(unsigned long evt,
812 struct clock_event_device *dev) 857 struct clock_event_device *dev)
813{ 858{
814 __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt; 859 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
815 set_dec(evt); 860 set_dec(evt);
816 return 0; 861 return 0;
817} 862}
@@ -825,7 +870,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
825 870
826static void register_decrementer_clockevent(int cpu) 871static void register_decrementer_clockevent(int cpu)
827{ 872{
828 struct clock_event_device *dec = &per_cpu(decrementers, cpu); 873 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
829 874
830 *dec = decrementer_clockevent; 875 *dec = decrementer_clockevent;
831 dec->cpumask = cpumask_of_cpu(cpu); 876 dec->cpumask = cpumask_of_cpu(cpu);
@@ -836,7 +881,7 @@ static void register_decrementer_clockevent(int cpu)
836 clockevents_register_device(dec); 881 clockevents_register_device(dec);
837} 882}
838 883
839void init_decrementer_clockevent(void) 884static void __init init_decrementer_clockevent(void)
840{ 885{
841 int cpu = smp_processor_id(); 886 int cpu = smp_processor_id();
842 887
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 59c464e26f38..848a20475db8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -334,18 +334,25 @@ static inline int check_io_access(struct pt_regs *regs)
334#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 334#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
335#endif 335#endif
336 336
337static int generic_machine_check_exception(struct pt_regs *regs) 337#if defined(CONFIG_4xx)
338int machine_check_4xx(struct pt_regs *regs)
338{ 339{
339 unsigned long reason = get_mc_reason(regs); 340 unsigned long reason = get_mc_reason(regs);
340 341
341#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
342 if (reason & ESR_IMCP) { 342 if (reason & ESR_IMCP) {
343 printk("Instruction"); 343 printk("Instruction");
344 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 344 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
345 } else 345 } else
346 printk("Data"); 346 printk("Data");
347 printk(" machine check in kernel mode.\n"); 347 printk(" machine check in kernel mode.\n");
348#elif defined(CONFIG_440A) 348
349 return 0;
350}
351
352int machine_check_440A(struct pt_regs *regs)
353{
354 unsigned long reason = get_mc_reason(regs);
355
349 printk("Machine check in kernel mode.\n"); 356 printk("Machine check in kernel mode.\n");
350 if (reason & ESR_IMCP){ 357 if (reason & ESR_IMCP){
351 printk("Instruction Synchronous Machine Check exception\n"); 358 printk("Instruction Synchronous Machine Check exception\n");
@@ -375,7 +382,13 @@ static int generic_machine_check_exception(struct pt_regs *regs)
375 /* Clear MCSR */ 382 /* Clear MCSR */
376 mtspr(SPRN_MCSR, mcsr); 383 mtspr(SPRN_MCSR, mcsr);
377 } 384 }
378#elif defined (CONFIG_E500) 385 return 0;
386}
387#elif defined(CONFIG_E500)
388int machine_check_e500(struct pt_regs *regs)
389{
390 unsigned long reason = get_mc_reason(regs);
391
379 printk("Machine check in kernel mode.\n"); 392 printk("Machine check in kernel mode.\n");
380 printk("Caused by (from MCSR=%lx): ", reason); 393 printk("Caused by (from MCSR=%lx): ", reason);
381 394
@@ -403,7 +416,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
403 printk("Bus - Instruction Parity Error\n"); 416 printk("Bus - Instruction Parity Error\n");
404 if (reason & MCSR_BUS_RPERR) 417 if (reason & MCSR_BUS_RPERR)
405 printk("Bus - Read Parity Error\n"); 418 printk("Bus - Read Parity Error\n");
406#elif defined (CONFIG_E200) 419
420 return 0;
421}
422#elif defined(CONFIG_E200)
423int machine_check_e200(struct pt_regs *regs)
424{
425 unsigned long reason = get_mc_reason(regs);
426
407 printk("Machine check in kernel mode.\n"); 427 printk("Machine check in kernel mode.\n");
408 printk("Caused by (from MCSR=%lx): ", reason); 428 printk("Caused by (from MCSR=%lx): ", reason);
409 429
@@ -421,7 +441,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
421 printk("Bus - Read Bus Error on data load\n"); 441 printk("Bus - Read Bus Error on data load\n");
422 if (reason & MCSR_BUS_WRERR) 442 if (reason & MCSR_BUS_WRERR)
423 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 443 printk("Bus - Write Bus Error on buffered store or cache line push\n");
424#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ 444
445 return 0;
446}
447#else
448int machine_check_generic(struct pt_regs *regs)
449{
450 unsigned long reason = get_mc_reason(regs);
451
425 printk("Machine check in kernel mode.\n"); 452 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from SRR1=%lx): ", reason); 453 printk("Caused by (from SRR1=%lx): ", reason);
427 switch (reason & 0x601F0000) { 454 switch (reason & 0x601F0000) {
@@ -451,22 +478,26 @@ static int generic_machine_check_exception(struct pt_regs *regs)
451 default: 478 default:
452 printk("Unknown values in msr\n"); 479 printk("Unknown values in msr\n");
453 } 480 }
454#endif /* CONFIG_4xx */
455
456 return 0; 481 return 0;
457} 482}
483#endif /* everything else */
458 484
459void machine_check_exception(struct pt_regs *regs) 485void machine_check_exception(struct pt_regs *regs)
460{ 486{
461 int recover = 0; 487 int recover = 0;
462 488
463 /* See if any machine dependent calls */ 489 /* See if any machine dependent calls. In theory, we would want
490 * to call the CPU first, and call the ppc_md. one if the CPU
491 * one returns a positive number. However there is existing code
492 * that assumes the board gets a first chance, so let's keep it
493 * that way for now and fix things later. --BenH.
494 */
464 if (ppc_md.machine_check_exception) 495 if (ppc_md.machine_check_exception)
465 recover = ppc_md.machine_check_exception(regs); 496 recover = ppc_md.machine_check_exception(regs);
466 else 497 else if (cur_cpu_spec->machine_check)
467 recover = generic_machine_check_exception(regs); 498 recover = cur_cpu_spec->machine_check(regs);
468 499
469 if (recover) 500 if (recover > 0)
470 return; 501 return;
471 502
472 if (user_mode(regs)) { 503 if (user_mode(regs)) {
@@ -476,7 +507,12 @@ void machine_check_exception(struct pt_regs *regs)
476 } 507 }
477 508
478#if defined(CONFIG_8xx) && defined(CONFIG_PCI) 509#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
479 /* the qspan pci read routines can cause machine checks -- Cort */ 510 /* the qspan pci read routines can cause machine checks -- Cort
511 *
512 * yuck !!! that totally needs to go away ! There are better ways
513 * to deal with that than having a wart in the mcheck handler.
514 * -- BenH
515 */
480 bad_page_fault(regs, regs->dar, SIGBUS); 516 bad_page_fault(regs, regs->dar, SIGBUS);
481 return; 517 return;
482#endif 518#endif
@@ -622,6 +658,9 @@ static void parse_fpe(struct pt_regs *regs)
622#define INST_POPCNTB 0x7c0000f4 658#define INST_POPCNTB 0x7c0000f4
623#define INST_POPCNTB_MASK 0xfc0007fe 659#define INST_POPCNTB_MASK 0xfc0007fe
624 660
661#define INST_ISEL 0x7c00001e
662#define INST_ISEL_MASK 0xfc00003e
663
625static int emulate_string_inst(struct pt_regs *regs, u32 instword) 664static int emulate_string_inst(struct pt_regs *regs, u32 instword)
626{ 665{
627 u8 rT = (instword >> 21) & 0x1f; 666 u8 rT = (instword >> 21) & 0x1f;
@@ -707,6 +746,23 @@ static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
707 return 0; 746 return 0;
708} 747}
709 748
749static int emulate_isel(struct pt_regs *regs, u32 instword)
750{
751 u8 rT = (instword >> 21) & 0x1f;
752 u8 rA = (instword >> 16) & 0x1f;
753 u8 rB = (instword >> 11) & 0x1f;
754 u8 BC = (instword >> 6) & 0x1f;
755 u8 bit;
756 unsigned long tmp;
757
758 tmp = (rA == 0) ? 0 : regs->gpr[rA];
759 bit = (regs->ccr >> (31 - BC)) & 0x1;
760
761 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
762
763 return 0;
764}
765
710static int emulate_instruction(struct pt_regs *regs) 766static int emulate_instruction(struct pt_regs *regs)
711{ 767{
712 u32 instword; 768 u32 instword;
@@ -749,6 +805,11 @@ static int emulate_instruction(struct pt_regs *regs)
749 return emulate_popcntb_inst(regs, instword); 805 return emulate_popcntb_inst(regs, instword);
750 } 806 }
751 807
808 /* Emulate isel (Integer Select) instruction */
809 if ((instword & INST_ISEL_MASK) == INST_ISEL) {
810 return emulate_isel(regs, instword);
811 }
812
752 return -EINVAL; 813 return -EINVAL;
753} 814}
754 815
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index d723070c9a33..7aad6203e411 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -54,9 +54,16 @@ void __init udbg_early_init(void)
54#elif defined(CONFIG_PPC_EARLY_DEBUG_44x) 54#elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
55 /* PPC44x debug */ 55 /* PPC44x debug */
56 udbg_init_44x_as1(); 56 udbg_init_44x_as1();
57#elif defined(CONFIG_PPC_EARLY_DEBUG_40x)
58 /* PPC40x debug */
59 udbg_init_40x_realmode();
57#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM) 60#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
58 udbg_init_cpm(); 61 udbg_init_cpm();
59#endif 62#endif
63
64#ifdef CONFIG_PPC_EARLY_DEBUG
65 console_loglevel = 10;
66#endif
60} 67}
61 68
62/* udbg library, used by xmon et al */ 69/* udbg library, used by xmon et al */
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 833a3d0bcfa7..cb01ebc59387 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -46,7 +46,7 @@ struct NS16550 {
46 46
47#define LCR_DLAB 0x80 47#define LCR_DLAB 0x80
48 48
49static volatile struct NS16550 __iomem *udbg_comport; 49static struct NS16550 __iomem *udbg_comport;
50 50
51static void udbg_550_putc(char c) 51static void udbg_550_putc(char c)
52{ 52{
@@ -117,7 +117,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
117{ 117{
118 unsigned int dll, dlm, divisor, prescaler, speed; 118 unsigned int dll, dlm, divisor, prescaler, speed;
119 u8 old_lcr; 119 u8 old_lcr;
120 volatile struct NS16550 __iomem *port = comport; 120 struct NS16550 __iomem *port = comport;
121 121
122 old_lcr = in_8(&port->lcr); 122 old_lcr = in_8(&port->lcr);
123 123
@@ -162,7 +162,7 @@ void udbg_maple_real_putc(char c)
162 162
163void __init udbg_init_maple_realmode(void) 163void __init udbg_init_maple_realmode(void)
164{ 164{
165 udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8; 165 udbg_comport = (struct NS16550 __iomem *)0xf40003f8;
166 166
167 udbg_putc = udbg_maple_real_putc; 167 udbg_putc = udbg_maple_real_putc;
168 udbg_getc = NULL; 168 udbg_getc = NULL;
@@ -184,7 +184,7 @@ void udbg_pas_real_putc(char c)
184 184
185void udbg_init_pas_realmode(void) 185void udbg_init_pas_realmode(void)
186{ 186{
187 udbg_comport = (volatile struct NS16550 __iomem *)0xfcff03f8UL; 187 udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
188 188
189 udbg_putc = udbg_pas_real_putc; 189 udbg_putc = udbg_pas_real_putc;
190 udbg_getc = NULL; 190 udbg_getc = NULL;
@@ -219,9 +219,42 @@ static int udbg_44x_as1_getc(void)
219void __init udbg_init_44x_as1(void) 219void __init udbg_init_44x_as1(void)
220{ 220{
221 udbg_comport = 221 udbg_comport =
222 (volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR; 222 (struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
223 223
224 udbg_putc = udbg_44x_as1_putc; 224 udbg_putc = udbg_44x_as1_putc;
225 udbg_getc = udbg_44x_as1_getc; 225 udbg_getc = udbg_44x_as1_getc;
226} 226}
227#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 227#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
228
229#ifdef CONFIG_PPC_EARLY_DEBUG_40x
230static void udbg_40x_real_putc(char c)
231{
232 if (udbg_comport) {
233 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
234 /* wait for idle */;
235 real_writeb(c, &udbg_comport->thr); eieio();
236 if (c == '\n')
237 udbg_40x_real_putc('\r');
238 }
239}
240
241static int udbg_40x_real_getc(void)
242{
243 if (udbg_comport) {
244 while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0)
245 ; /* wait for char */
246 return real_readb(&udbg_comport->rbr);
247 }
248 return -1;
249}
250
251void __init udbg_init_40x_realmode(void)
252{
253 udbg_comport = (struct NS16550 __iomem *)
254 CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
255
256 udbg_putc = udbg_40x_real_putc;
257 udbg_getc = udbg_40x_real_getc;
258 udbg_getc_poll = NULL;
259}
260#endif /* CONFIG_PPC_EARLY_DEBUG_40x */