aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S4
-rw-r--r--arch/powerpc/kernel/cputable.c224
-rw-r--r--arch/powerpc/kernel/crash.c77
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S15
-rw-r--r--arch/powerpc/kernel/fpu.S10
-rw-r--r--arch/powerpc/kernel/head_64.S112
-rw-r--r--arch/powerpc/kernel/idle_power4.S8
-rw-r--r--arch/powerpc/kernel/irq.c12
-rw-r--r--arch/powerpc/kernel/lparcfg.c13
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/of_device.c4
-rw-r--r--arch/powerpc/kernel/paca.c36
-rw-r--r--arch/powerpc/kernel/pci_32.c1897
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/prom.c109
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/prom_parse.c3
-rw-r--r--arch/powerpc/kernel/rtas.c96
-rw-r--r--arch/powerpc/kernel/setup-common.c9
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/vio.c8
25 files changed, 2392 insertions, 274 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a94699d8dc52..c287980b7e65 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_MODULES) += $(module-y)
60 60
61pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ 61pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
62 pci_direct_iommu.o iomap.o 62 pci_direct_iommu.o iomap.o
63obj-$(CONFIG_PCI) += $(pci64-y) 63pci32-$(CONFIG_PPC32) := pci_32.o
64obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
64kexec-$(CONFIG_PPC64) := machine_kexec_64.o crash.o 65kexec-$(CONFIG_PPC64) := machine_kexec_64.o crash.o
65kexec-$(CONFIG_PPC32) := machine_kexec_32.o 66kexec-$(CONFIG_PPC32) := machine_kexec_32.o
66obj-$(CONFIG_KEXEC) += machine_kexec.o $(kexec-y) 67obj-$(CONFIG_KEXEC) += machine_kexec.o $(kexec-y)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 56399c5c931a..840aad43a98b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -135,7 +135,7 @@ int main(void)
135 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 135 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
136 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); 136 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); 138 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140 140
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index cca942fe6115..b61d86e7ceb6 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -130,7 +130,7 @@ _GLOBAL(__save_cpu_setup)
130 mfcr r7 130 mfcr r7
131 131
132 /* Get storage ptr */ 132 /* Get storage ptr */
133 LOADADDR(r5,cpu_state_storage) 133 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
134 134
135 /* We only deal with 970 for now */ 135 /* We only deal with 970 for now */
136 mfspr r0,SPRN_PVR 136 mfspr r0,SPRN_PVR
@@ -164,7 +164,7 @@ _GLOBAL(__restore_cpu_setup)
164 /* Get storage ptr (FIXME when using anton reloc as we 164 /* Get storage ptr (FIXME when using anton reloc as we
165 * are running with translation disabled here 165 * are running with translation disabled here
166 */ 166 */
167 LOADADDR(r5,cpu_state_storage) 167 LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
168 168
169 /* We only deal with 970 for now */ 169 /* We only deal with 970 for now */
170 mfspr r0,SPRN_PVR 170 mfspr r0,SPRN_PVR
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 43c74a6b07b1..10696456a4c6 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -55,7 +55,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
55#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) 55#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
56#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5) 56#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
57#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS) 57#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
58 58#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
59 PPC_FEATURE_BOOKE)
59 60
60/* We only set the spe features if the kernel was compiled with 61/* We only set the spe features if the kernel was compiled with
61 * spe support 62 * spe support
@@ -79,7 +80,8 @@ struct cpu_spec cpu_specs[] = {
79 .num_pmcs = 8, 80 .num_pmcs = 8,
80 .cpu_setup = __setup_cpu_power3, 81 .cpu_setup = __setup_cpu_power3,
81 .oprofile_cpu_type = "ppc64/power3", 82 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_type = RS64, 83 .oprofile_type = PPC_OPROFILE_RS64,
84 .platform = "power3",
83 }, 85 },
84 { /* Power3+ */ 86 { /* Power3+ */
85 .pvr_mask = 0xffff0000, 87 .pvr_mask = 0xffff0000,
@@ -92,7 +94,8 @@ struct cpu_spec cpu_specs[] = {
92 .num_pmcs = 8, 94 .num_pmcs = 8,
93 .cpu_setup = __setup_cpu_power3, 95 .cpu_setup = __setup_cpu_power3,
94 .oprofile_cpu_type = "ppc64/power3", 96 .oprofile_cpu_type = "ppc64/power3",
95 .oprofile_type = RS64, 97 .oprofile_type = PPC_OPROFILE_RS64,
98 .platform = "power3",
96 }, 99 },
97 { /* Northstar */ 100 { /* Northstar */
98 .pvr_mask = 0xffff0000, 101 .pvr_mask = 0xffff0000,
@@ -105,7 +108,8 @@ struct cpu_spec cpu_specs[] = {
105 .num_pmcs = 8, 108 .num_pmcs = 8,
106 .cpu_setup = __setup_cpu_power3, 109 .cpu_setup = __setup_cpu_power3,
107 .oprofile_cpu_type = "ppc64/rs64", 110 .oprofile_cpu_type = "ppc64/rs64",
108 .oprofile_type = RS64, 111 .oprofile_type = PPC_OPROFILE_RS64,
112 .platform = "rs64",
109 }, 113 },
110 { /* Pulsar */ 114 { /* Pulsar */
111 .pvr_mask = 0xffff0000, 115 .pvr_mask = 0xffff0000,
@@ -118,7 +122,8 @@ struct cpu_spec cpu_specs[] = {
118 .num_pmcs = 8, 122 .num_pmcs = 8,
119 .cpu_setup = __setup_cpu_power3, 123 .cpu_setup = __setup_cpu_power3,
120 .oprofile_cpu_type = "ppc64/rs64", 124 .oprofile_cpu_type = "ppc64/rs64",
121 .oprofile_type = RS64, 125 .oprofile_type = PPC_OPROFILE_RS64,
126 .platform = "rs64",
122 }, 127 },
123 { /* I-star */ 128 { /* I-star */
124 .pvr_mask = 0xffff0000, 129 .pvr_mask = 0xffff0000,
@@ -131,7 +136,8 @@ struct cpu_spec cpu_specs[] = {
131 .num_pmcs = 8, 136 .num_pmcs = 8,
132 .cpu_setup = __setup_cpu_power3, 137 .cpu_setup = __setup_cpu_power3,
133 .oprofile_cpu_type = "ppc64/rs64", 138 .oprofile_cpu_type = "ppc64/rs64",
134 .oprofile_type = RS64, 139 .oprofile_type = PPC_OPROFILE_RS64,
140 .platform = "rs64",
135 }, 141 },
136 { /* S-star */ 142 { /* S-star */
137 .pvr_mask = 0xffff0000, 143 .pvr_mask = 0xffff0000,
@@ -144,7 +150,8 @@ struct cpu_spec cpu_specs[] = {
144 .num_pmcs = 8, 150 .num_pmcs = 8,
145 .cpu_setup = __setup_cpu_power3, 151 .cpu_setup = __setup_cpu_power3,
146 .oprofile_cpu_type = "ppc64/rs64", 152 .oprofile_cpu_type = "ppc64/rs64",
147 .oprofile_type = RS64, 153 .oprofile_type = PPC_OPROFILE_RS64,
154 .platform = "rs64",
148 }, 155 },
149 { /* Power4 */ 156 { /* Power4 */
150 .pvr_mask = 0xffff0000, 157 .pvr_mask = 0xffff0000,
@@ -157,7 +164,8 @@ struct cpu_spec cpu_specs[] = {
157 .num_pmcs = 8, 164 .num_pmcs = 8,
158 .cpu_setup = __setup_cpu_power4, 165 .cpu_setup = __setup_cpu_power4,
159 .oprofile_cpu_type = "ppc64/power4", 166 .oprofile_cpu_type = "ppc64/power4",
160 .oprofile_type = POWER4, 167 .oprofile_type = PPC_OPROFILE_POWER4,
168 .platform = "power4",
161 }, 169 },
162 { /* Power4+ */ 170 { /* Power4+ */
163 .pvr_mask = 0xffff0000, 171 .pvr_mask = 0xffff0000,
@@ -170,7 +178,8 @@ struct cpu_spec cpu_specs[] = {
170 .num_pmcs = 8, 178 .num_pmcs = 8,
171 .cpu_setup = __setup_cpu_power4, 179 .cpu_setup = __setup_cpu_power4,
172 .oprofile_cpu_type = "ppc64/power4", 180 .oprofile_cpu_type = "ppc64/power4",
173 .oprofile_type = POWER4, 181 .oprofile_type = PPC_OPROFILE_POWER4,
182 .platform = "power4",
174 }, 183 },
175 { /* PPC970 */ 184 { /* PPC970 */
176 .pvr_mask = 0xffff0000, 185 .pvr_mask = 0xffff0000,
@@ -184,7 +193,8 @@ struct cpu_spec cpu_specs[] = {
184 .num_pmcs = 8, 193 .num_pmcs = 8,
185 .cpu_setup = __setup_cpu_ppc970, 194 .cpu_setup = __setup_cpu_ppc970,
186 .oprofile_cpu_type = "ppc64/970", 195 .oprofile_cpu_type = "ppc64/970",
187 .oprofile_type = POWER4, 196 .oprofile_type = PPC_OPROFILE_POWER4,
197 .platform = "ppc970",
188 }, 198 },
189#endif /* CONFIG_PPC64 */ 199#endif /* CONFIG_PPC64 */
190#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4) 200#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
@@ -204,7 +214,8 @@ struct cpu_spec cpu_specs[] = {
204 .num_pmcs = 8, 214 .num_pmcs = 8,
205 .cpu_setup = __setup_cpu_ppc970, 215 .cpu_setup = __setup_cpu_ppc970,
206 .oprofile_cpu_type = "ppc64/970", 216 .oprofile_cpu_type = "ppc64/970",
207 .oprofile_type = POWER4, 217 .oprofile_type = PPC_OPROFILE_POWER4,
218 .platform = "ppc970",
208 }, 219 },
209#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */ 220#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
210#ifdef CONFIG_PPC64 221#ifdef CONFIG_PPC64
@@ -219,7 +230,8 @@ struct cpu_spec cpu_specs[] = {
219 .dcache_bsize = 128, 230 .dcache_bsize = 128,
220 .cpu_setup = __setup_cpu_ppc970, 231 .cpu_setup = __setup_cpu_ppc970,
221 .oprofile_cpu_type = "ppc64/970", 232 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_type = POWER4, 233 .oprofile_type = PPC_OPROFILE_POWER4,
234 .platform = "ppc970",
223 }, 235 },
224 { /* Power5 GR */ 236 { /* Power5 GR */
225 .pvr_mask = 0xffff0000, 237 .pvr_mask = 0xffff0000,
@@ -232,7 +244,8 @@ struct cpu_spec cpu_specs[] = {
232 .num_pmcs = 6, 244 .num_pmcs = 6,
233 .cpu_setup = __setup_cpu_power4, 245 .cpu_setup = __setup_cpu_power4,
234 .oprofile_cpu_type = "ppc64/power5", 246 .oprofile_cpu_type = "ppc64/power5",
235 .oprofile_type = POWER4, 247 .oprofile_type = PPC_OPROFILE_POWER4,
248 .platform = "power5",
236 }, 249 },
237 { /* Power5 GS */ 250 { /* Power5 GS */
238 .pvr_mask = 0xffff0000, 251 .pvr_mask = 0xffff0000,
@@ -245,7 +258,8 @@ struct cpu_spec cpu_specs[] = {
245 .num_pmcs = 6, 258 .num_pmcs = 6,
246 .cpu_setup = __setup_cpu_power4, 259 .cpu_setup = __setup_cpu_power4,
247 .oprofile_cpu_type = "ppc64/power5+", 260 .oprofile_cpu_type = "ppc64/power5+",
248 .oprofile_type = POWER4, 261 .oprofile_type = PPC_OPROFILE_POWER4,
262 .platform = "power5+",
249 }, 263 },
250 { /* Cell Broadband Engine */ 264 { /* Cell Broadband Engine */
251 .pvr_mask = 0xffff0000, 265 .pvr_mask = 0xffff0000,
@@ -257,6 +271,7 @@ struct cpu_spec cpu_specs[] = {
257 .icache_bsize = 128, 271 .icache_bsize = 128,
258 .dcache_bsize = 128, 272 .dcache_bsize = 128,
259 .cpu_setup = __setup_cpu_be, 273 .cpu_setup = __setup_cpu_be,
274 .platform = "ppc-cell-be",
260 }, 275 },
261 { /* default match */ 276 { /* default match */
262 .pvr_mask = 0x00000000, 277 .pvr_mask = 0x00000000,
@@ -268,6 +283,7 @@ struct cpu_spec cpu_specs[] = {
268 .dcache_bsize = 128, 283 .dcache_bsize = 128,
269 .num_pmcs = 6, 284 .num_pmcs = 6,
270 .cpu_setup = __setup_cpu_power4, 285 .cpu_setup = __setup_cpu_power4,
286 .platform = "power4",
271 } 287 }
272#endif /* CONFIG_PPC64 */ 288#endif /* CONFIG_PPC64 */
273#ifdef CONFIG_PPC32 289#ifdef CONFIG_PPC32
@@ -281,6 +297,7 @@ struct cpu_spec cpu_specs[] = {
281 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 297 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
282 .icache_bsize = 32, 298 .icache_bsize = 32,
283 .dcache_bsize = 32, 299 .dcache_bsize = 32,
300 .platform = "ppc601",
284 }, 301 },
285 { /* 603 */ 302 { /* 603 */
286 .pvr_mask = 0xffff0000, 303 .pvr_mask = 0xffff0000,
@@ -290,7 +307,8 @@ struct cpu_spec cpu_specs[] = {
290 .cpu_user_features = COMMON_USER, 307 .cpu_user_features = COMMON_USER,
291 .icache_bsize = 32, 308 .icache_bsize = 32,
292 .dcache_bsize = 32, 309 .dcache_bsize = 32,
293 .cpu_setup = __setup_cpu_603 310 .cpu_setup = __setup_cpu_603,
311 .platform = "ppc603",
294 }, 312 },
295 { /* 603e */ 313 { /* 603e */
296 .pvr_mask = 0xffff0000, 314 .pvr_mask = 0xffff0000,
@@ -300,7 +318,8 @@ struct cpu_spec cpu_specs[] = {
300 .cpu_user_features = COMMON_USER, 318 .cpu_user_features = COMMON_USER,
301 .icache_bsize = 32, 319 .icache_bsize = 32,
302 .dcache_bsize = 32, 320 .dcache_bsize = 32,
303 .cpu_setup = __setup_cpu_603 321 .cpu_setup = __setup_cpu_603,
322 .platform = "ppc603",
304 }, 323 },
305 { /* 603ev */ 324 { /* 603ev */
306 .pvr_mask = 0xffff0000, 325 .pvr_mask = 0xffff0000,
@@ -310,7 +329,8 @@ struct cpu_spec cpu_specs[] = {
310 .cpu_user_features = COMMON_USER, 329 .cpu_user_features = COMMON_USER,
311 .icache_bsize = 32, 330 .icache_bsize = 32,
312 .dcache_bsize = 32, 331 .dcache_bsize = 32,
313 .cpu_setup = __setup_cpu_603 332 .cpu_setup = __setup_cpu_603,
333 .platform = "ppc603",
314 }, 334 },
315 { /* 604 */ 335 { /* 604 */
316 .pvr_mask = 0xffff0000, 336 .pvr_mask = 0xffff0000,
@@ -321,7 +341,8 @@ struct cpu_spec cpu_specs[] = {
321 .icache_bsize = 32, 341 .icache_bsize = 32,
322 .dcache_bsize = 32, 342 .dcache_bsize = 32,
323 .num_pmcs = 2, 343 .num_pmcs = 2,
324 .cpu_setup = __setup_cpu_604 344 .cpu_setup = __setup_cpu_604,
345 .platform = "ppc604",
325 }, 346 },
326 { /* 604e */ 347 { /* 604e */
327 .pvr_mask = 0xfffff000, 348 .pvr_mask = 0xfffff000,
@@ -332,7 +353,8 @@ struct cpu_spec cpu_specs[] = {
332 .icache_bsize = 32, 353 .icache_bsize = 32,
333 .dcache_bsize = 32, 354 .dcache_bsize = 32,
334 .num_pmcs = 4, 355 .num_pmcs = 4,
335 .cpu_setup = __setup_cpu_604 356 .cpu_setup = __setup_cpu_604,
357 .platform = "ppc604",
336 }, 358 },
337 { /* 604r */ 359 { /* 604r */
338 .pvr_mask = 0xffff0000, 360 .pvr_mask = 0xffff0000,
@@ -343,7 +365,8 @@ struct cpu_spec cpu_specs[] = {
343 .icache_bsize = 32, 365 .icache_bsize = 32,
344 .dcache_bsize = 32, 366 .dcache_bsize = 32,
345 .num_pmcs = 4, 367 .num_pmcs = 4,
346 .cpu_setup = __setup_cpu_604 368 .cpu_setup = __setup_cpu_604,
369 .platform = "ppc604",
347 }, 370 },
348 { /* 604ev */ 371 { /* 604ev */
349 .pvr_mask = 0xffff0000, 372 .pvr_mask = 0xffff0000,
@@ -354,7 +377,8 @@ struct cpu_spec cpu_specs[] = {
354 .icache_bsize = 32, 377 .icache_bsize = 32,
355 .dcache_bsize = 32, 378 .dcache_bsize = 32,
356 .num_pmcs = 4, 379 .num_pmcs = 4,
357 .cpu_setup = __setup_cpu_604 380 .cpu_setup = __setup_cpu_604,
381 .platform = "ppc604",
358 }, 382 },
359 { /* 740/750 (0x4202, don't support TAU ?) */ 383 { /* 740/750 (0x4202, don't support TAU ?) */
360 .pvr_mask = 0xffffffff, 384 .pvr_mask = 0xffffffff,
@@ -365,7 +389,8 @@ struct cpu_spec cpu_specs[] = {
365 .icache_bsize = 32, 389 .icache_bsize = 32,
366 .dcache_bsize = 32, 390 .dcache_bsize = 32,
367 .num_pmcs = 4, 391 .num_pmcs = 4,
368 .cpu_setup = __setup_cpu_750 392 .cpu_setup = __setup_cpu_750,
393 .platform = "ppc750",
369 }, 394 },
370 { /* 750CX (80100 and 8010x?) */ 395 { /* 750CX (80100 and 8010x?) */
371 .pvr_mask = 0xfffffff0, 396 .pvr_mask = 0xfffffff0,
@@ -376,7 +401,8 @@ struct cpu_spec cpu_specs[] = {
376 .icache_bsize = 32, 401 .icache_bsize = 32,
377 .dcache_bsize = 32, 402 .dcache_bsize = 32,
378 .num_pmcs = 4, 403 .num_pmcs = 4,
379 .cpu_setup = __setup_cpu_750cx 404 .cpu_setup = __setup_cpu_750cx,
405 .platform = "ppc750",
380 }, 406 },
381 { /* 750CX (82201 and 82202) */ 407 { /* 750CX (82201 and 82202) */
382 .pvr_mask = 0xfffffff0, 408 .pvr_mask = 0xfffffff0,
@@ -387,7 +413,8 @@ struct cpu_spec cpu_specs[] = {
387 .icache_bsize = 32, 413 .icache_bsize = 32,
388 .dcache_bsize = 32, 414 .dcache_bsize = 32,
389 .num_pmcs = 4, 415 .num_pmcs = 4,
390 .cpu_setup = __setup_cpu_750cx 416 .cpu_setup = __setup_cpu_750cx,
417 .platform = "ppc750",
391 }, 418 },
392 { /* 750CXe (82214) */ 419 { /* 750CXe (82214) */
393 .pvr_mask = 0xfffffff0, 420 .pvr_mask = 0xfffffff0,
@@ -398,7 +425,8 @@ struct cpu_spec cpu_specs[] = {
398 .icache_bsize = 32, 425 .icache_bsize = 32,
399 .dcache_bsize = 32, 426 .dcache_bsize = 32,
400 .num_pmcs = 4, 427 .num_pmcs = 4,
401 .cpu_setup = __setup_cpu_750cx 428 .cpu_setup = __setup_cpu_750cx,
429 .platform = "ppc750",
402 }, 430 },
403 { /* 750CXe "Gekko" (83214) */ 431 { /* 750CXe "Gekko" (83214) */
404 .pvr_mask = 0xffffffff, 432 .pvr_mask = 0xffffffff,
@@ -409,7 +437,8 @@ struct cpu_spec cpu_specs[] = {
409 .icache_bsize = 32, 437 .icache_bsize = 32,
410 .dcache_bsize = 32, 438 .dcache_bsize = 32,
411 .num_pmcs = 4, 439 .num_pmcs = 4,
412 .cpu_setup = __setup_cpu_750cx 440 .cpu_setup = __setup_cpu_750cx,
441 .platform = "ppc750",
413 }, 442 },
414 { /* 745/755 */ 443 { /* 745/755 */
415 .pvr_mask = 0xfffff000, 444 .pvr_mask = 0xfffff000,
@@ -420,7 +449,8 @@ struct cpu_spec cpu_specs[] = {
420 .icache_bsize = 32, 449 .icache_bsize = 32,
421 .dcache_bsize = 32, 450 .dcache_bsize = 32,
422 .num_pmcs = 4, 451 .num_pmcs = 4,
423 .cpu_setup = __setup_cpu_750 452 .cpu_setup = __setup_cpu_750,
453 .platform = "ppc750",
424 }, 454 },
425 { /* 750FX rev 1.x */ 455 { /* 750FX rev 1.x */
426 .pvr_mask = 0xffffff00, 456 .pvr_mask = 0xffffff00,
@@ -431,7 +461,8 @@ struct cpu_spec cpu_specs[] = {
431 .icache_bsize = 32, 461 .icache_bsize = 32,
432 .dcache_bsize = 32, 462 .dcache_bsize = 32,
433 .num_pmcs = 4, 463 .num_pmcs = 4,
434 .cpu_setup = __setup_cpu_750 464 .cpu_setup = __setup_cpu_750,
465 .platform = "ppc750",
435 }, 466 },
436 { /* 750FX rev 2.0 must disable HID0[DPM] */ 467 { /* 750FX rev 2.0 must disable HID0[DPM] */
437 .pvr_mask = 0xffffffff, 468 .pvr_mask = 0xffffffff,
@@ -442,7 +473,8 @@ struct cpu_spec cpu_specs[] = {
442 .icache_bsize = 32, 473 .icache_bsize = 32,
443 .dcache_bsize = 32, 474 .dcache_bsize = 32,
444 .num_pmcs = 4, 475 .num_pmcs = 4,
445 .cpu_setup = __setup_cpu_750 476 .cpu_setup = __setup_cpu_750,
477 .platform = "ppc750",
446 }, 478 },
447 { /* 750FX (All revs except 2.0) */ 479 { /* 750FX (All revs except 2.0) */
448 .pvr_mask = 0xffff0000, 480 .pvr_mask = 0xffff0000,
@@ -453,7 +485,8 @@ struct cpu_spec cpu_specs[] = {
453 .icache_bsize = 32, 485 .icache_bsize = 32,
454 .dcache_bsize = 32, 486 .dcache_bsize = 32,
455 .num_pmcs = 4, 487 .num_pmcs = 4,
456 .cpu_setup = __setup_cpu_750fx 488 .cpu_setup = __setup_cpu_750fx,
489 .platform = "ppc750",
457 }, 490 },
458 { /* 750GX */ 491 { /* 750GX */
459 .pvr_mask = 0xffff0000, 492 .pvr_mask = 0xffff0000,
@@ -464,7 +497,8 @@ struct cpu_spec cpu_specs[] = {
464 .icache_bsize = 32, 497 .icache_bsize = 32,
465 .dcache_bsize = 32, 498 .dcache_bsize = 32,
466 .num_pmcs = 4, 499 .num_pmcs = 4,
467 .cpu_setup = __setup_cpu_750fx 500 .cpu_setup = __setup_cpu_750fx,
501 .platform = "ppc750",
468 }, 502 },
469 { /* 740/750 (L2CR bit need fixup for 740) */ 503 { /* 740/750 (L2CR bit need fixup for 740) */
470 .pvr_mask = 0xffff0000, 504 .pvr_mask = 0xffff0000,
@@ -475,7 +509,8 @@ struct cpu_spec cpu_specs[] = {
475 .icache_bsize = 32, 509 .icache_bsize = 32,
476 .dcache_bsize = 32, 510 .dcache_bsize = 32,
477 .num_pmcs = 4, 511 .num_pmcs = 4,
478 .cpu_setup = __setup_cpu_750 512 .cpu_setup = __setup_cpu_750,
513 .platform = "ppc750",
479 }, 514 },
480 { /* 7400 rev 1.1 ? (no TAU) */ 515 { /* 7400 rev 1.1 ? (no TAU) */
481 .pvr_mask = 0xffffffff, 516 .pvr_mask = 0xffffffff,
@@ -486,7 +521,8 @@ struct cpu_spec cpu_specs[] = {
486 .icache_bsize = 32, 521 .icache_bsize = 32,
487 .dcache_bsize = 32, 522 .dcache_bsize = 32,
488 .num_pmcs = 4, 523 .num_pmcs = 4,
489 .cpu_setup = __setup_cpu_7400 524 .cpu_setup = __setup_cpu_7400,
525 .platform = "ppc7400",
490 }, 526 },
491 { /* 7400 */ 527 { /* 7400 */
492 .pvr_mask = 0xffff0000, 528 .pvr_mask = 0xffff0000,
@@ -497,7 +533,8 @@ struct cpu_spec cpu_specs[] = {
497 .icache_bsize = 32, 533 .icache_bsize = 32,
498 .dcache_bsize = 32, 534 .dcache_bsize = 32,
499 .num_pmcs = 4, 535 .num_pmcs = 4,
500 .cpu_setup = __setup_cpu_7400 536 .cpu_setup = __setup_cpu_7400,
537 .platform = "ppc7400",
501 }, 538 },
502 { /* 7410 */ 539 { /* 7410 */
503 .pvr_mask = 0xffff0000, 540 .pvr_mask = 0xffff0000,
@@ -508,7 +545,8 @@ struct cpu_spec cpu_specs[] = {
508 .icache_bsize = 32, 545 .icache_bsize = 32,
509 .dcache_bsize = 32, 546 .dcache_bsize = 32,
510 .num_pmcs = 4, 547 .num_pmcs = 4,
511 .cpu_setup = __setup_cpu_7410 548 .cpu_setup = __setup_cpu_7410,
549 .platform = "ppc7400",
512 }, 550 },
513 { /* 7450 2.0 - no doze/nap */ 551 { /* 7450 2.0 - no doze/nap */
514 .pvr_mask = 0xffffffff, 552 .pvr_mask = 0xffffffff,
@@ -521,7 +559,8 @@ struct cpu_spec cpu_specs[] = {
521 .num_pmcs = 6, 559 .num_pmcs = 6,
522 .cpu_setup = __setup_cpu_745x, 560 .cpu_setup = __setup_cpu_745x,
523 .oprofile_cpu_type = "ppc/7450", 561 .oprofile_cpu_type = "ppc/7450",
524 .oprofile_type = G4, 562 .oprofile_type = PPC_OPROFILE_G4,
563 .platform = "ppc7450",
525 }, 564 },
526 { /* 7450 2.1 */ 565 { /* 7450 2.1 */
527 .pvr_mask = 0xffffffff, 566 .pvr_mask = 0xffffffff,
@@ -534,7 +573,8 @@ struct cpu_spec cpu_specs[] = {
534 .num_pmcs = 6, 573 .num_pmcs = 6,
535 .cpu_setup = __setup_cpu_745x, 574 .cpu_setup = __setup_cpu_745x,
536 .oprofile_cpu_type = "ppc/7450", 575 .oprofile_cpu_type = "ppc/7450",
537 .oprofile_type = G4, 576 .oprofile_type = PPC_OPROFILE_G4,
577 .platform = "ppc7450",
538 }, 578 },
539 { /* 7450 2.3 and newer */ 579 { /* 7450 2.3 and newer */
540 .pvr_mask = 0xffff0000, 580 .pvr_mask = 0xffff0000,
@@ -547,7 +587,8 @@ struct cpu_spec cpu_specs[] = {
547 .num_pmcs = 6, 587 .num_pmcs = 6,
548 .cpu_setup = __setup_cpu_745x, 588 .cpu_setup = __setup_cpu_745x,
549 .oprofile_cpu_type = "ppc/7450", 589 .oprofile_cpu_type = "ppc/7450",
550 .oprofile_type = G4, 590 .oprofile_type = PPC_OPROFILE_G4,
591 .platform = "ppc7450",
551 }, 592 },
552 { /* 7455 rev 1.x */ 593 { /* 7455 rev 1.x */
553 .pvr_mask = 0xffffff00, 594 .pvr_mask = 0xffffff00,
@@ -560,7 +601,8 @@ struct cpu_spec cpu_specs[] = {
560 .num_pmcs = 6, 601 .num_pmcs = 6,
561 .cpu_setup = __setup_cpu_745x, 602 .cpu_setup = __setup_cpu_745x,
562 .oprofile_cpu_type = "ppc/7450", 603 .oprofile_cpu_type = "ppc/7450",
563 .oprofile_type = G4, 604 .oprofile_type = PPC_OPROFILE_G4,
605 .platform = "ppc7450",
564 }, 606 },
565 { /* 7455 rev 2.0 */ 607 { /* 7455 rev 2.0 */
566 .pvr_mask = 0xffffffff, 608 .pvr_mask = 0xffffffff,
@@ -573,7 +615,8 @@ struct cpu_spec cpu_specs[] = {
573 .num_pmcs = 6, 615 .num_pmcs = 6,
574 .cpu_setup = __setup_cpu_745x, 616 .cpu_setup = __setup_cpu_745x,
575 .oprofile_cpu_type = "ppc/7450", 617 .oprofile_cpu_type = "ppc/7450",
576 .oprofile_type = G4, 618 .oprofile_type = PPC_OPROFILE_G4,
619 .platform = "ppc7450",
577 }, 620 },
578 { /* 7455 others */ 621 { /* 7455 others */
579 .pvr_mask = 0xffff0000, 622 .pvr_mask = 0xffff0000,
@@ -586,7 +629,8 @@ struct cpu_spec cpu_specs[] = {
586 .num_pmcs = 6, 629 .num_pmcs = 6,
587 .cpu_setup = __setup_cpu_745x, 630 .cpu_setup = __setup_cpu_745x,
588 .oprofile_cpu_type = "ppc/7450", 631 .oprofile_cpu_type = "ppc/7450",
589 .oprofile_type = G4, 632 .oprofile_type = PPC_OPROFILE_G4,
633 .platform = "ppc7450",
590 }, 634 },
591 { /* 7447/7457 Rev 1.0 */ 635 { /* 7447/7457 Rev 1.0 */
592 .pvr_mask = 0xffffffff, 636 .pvr_mask = 0xffffffff,
@@ -599,7 +643,8 @@ struct cpu_spec cpu_specs[] = {
599 .num_pmcs = 6, 643 .num_pmcs = 6,
600 .cpu_setup = __setup_cpu_745x, 644 .cpu_setup = __setup_cpu_745x,
601 .oprofile_cpu_type = "ppc/7450", 645 .oprofile_cpu_type = "ppc/7450",
602 .oprofile_type = G4, 646 .oprofile_type = PPC_OPROFILE_G4,
647 .platform = "ppc7450",
603 }, 648 },
604 { /* 7447/7457 Rev 1.1 */ 649 { /* 7447/7457 Rev 1.1 */
605 .pvr_mask = 0xffffffff, 650 .pvr_mask = 0xffffffff,
@@ -612,7 +657,8 @@ struct cpu_spec cpu_specs[] = {
612 .num_pmcs = 6, 657 .num_pmcs = 6,
613 .cpu_setup = __setup_cpu_745x, 658 .cpu_setup = __setup_cpu_745x,
614 .oprofile_cpu_type = "ppc/7450", 659 .oprofile_cpu_type = "ppc/7450",
615 .oprofile_type = G4, 660 .oprofile_type = PPC_OPROFILE_G4,
661 .platform = "ppc7450",
616 }, 662 },
617 { /* 7447/7457 Rev 1.2 and later */ 663 { /* 7447/7457 Rev 1.2 and later */
618 .pvr_mask = 0xffff0000, 664 .pvr_mask = 0xffff0000,
@@ -625,7 +671,8 @@ struct cpu_spec cpu_specs[] = {
625 .num_pmcs = 6, 671 .num_pmcs = 6,
626 .cpu_setup = __setup_cpu_745x, 672 .cpu_setup = __setup_cpu_745x,
627 .oprofile_cpu_type = "ppc/7450", 673 .oprofile_cpu_type = "ppc/7450",
628 .oprofile_type = G4, 674 .oprofile_type = PPC_OPROFILE_G4,
675 .platform = "ppc7450",
629 }, 676 },
630 { /* 7447A */ 677 { /* 7447A */
631 .pvr_mask = 0xffff0000, 678 .pvr_mask = 0xffff0000,
@@ -638,7 +685,8 @@ struct cpu_spec cpu_specs[] = {
638 .num_pmcs = 6, 685 .num_pmcs = 6,
639 .cpu_setup = __setup_cpu_745x, 686 .cpu_setup = __setup_cpu_745x,
640 .oprofile_cpu_type = "ppc/7450", 687 .oprofile_cpu_type = "ppc/7450",
641 .oprofile_type = G4, 688 .oprofile_type = PPC_OPROFILE_G4,
689 .platform = "ppc7450",
642 }, 690 },
643 { /* 7448 */ 691 { /* 7448 */
644 .pvr_mask = 0xffff0000, 692 .pvr_mask = 0xffff0000,
@@ -651,7 +699,8 @@ struct cpu_spec cpu_specs[] = {
651 .num_pmcs = 6, 699 .num_pmcs = 6,
652 .cpu_setup = __setup_cpu_745x, 700 .cpu_setup = __setup_cpu_745x,
653 .oprofile_cpu_type = "ppc/7450", 701 .oprofile_cpu_type = "ppc/7450",
654 .oprofile_type = G4, 702 .oprofile_type = PPC_OPROFILE_G4,
703 .platform = "ppc7450",
655 }, 704 },
656 { /* 82xx (8240, 8245, 8260 are all 603e cores) */ 705 { /* 82xx (8240, 8245, 8260 are all 603e cores) */
657 .pvr_mask = 0x7fff0000, 706 .pvr_mask = 0x7fff0000,
@@ -661,7 +710,8 @@ struct cpu_spec cpu_specs[] = {
661 .cpu_user_features = COMMON_USER, 710 .cpu_user_features = COMMON_USER,
662 .icache_bsize = 32, 711 .icache_bsize = 32,
663 .dcache_bsize = 32, 712 .dcache_bsize = 32,
664 .cpu_setup = __setup_cpu_603 713 .cpu_setup = __setup_cpu_603,
714 .platform = "ppc603",
665 }, 715 },
666 { /* All G2_LE (603e core, plus some) have the same pvr */ 716 { /* All G2_LE (603e core, plus some) have the same pvr */
667 .pvr_mask = 0x7fff0000, 717 .pvr_mask = 0x7fff0000,
@@ -671,7 +721,8 @@ struct cpu_spec cpu_specs[] = {
671 .cpu_user_features = COMMON_USER, 721 .cpu_user_features = COMMON_USER,
672 .icache_bsize = 32, 722 .icache_bsize = 32,
673 .dcache_bsize = 32, 723 .dcache_bsize = 32,
674 .cpu_setup = __setup_cpu_603 724 .cpu_setup = __setup_cpu_603,
725 .platform = "ppc603",
675 }, 726 },
676 { /* e300 (a 603e core, plus some) on 83xx */ 727 { /* e300 (a 603e core, plus some) on 83xx */
677 .pvr_mask = 0x7fff0000, 728 .pvr_mask = 0x7fff0000,
@@ -681,7 +732,8 @@ struct cpu_spec cpu_specs[] = {
681 .cpu_user_features = COMMON_USER, 732 .cpu_user_features = COMMON_USER,
682 .icache_bsize = 32, 733 .icache_bsize = 32,
683 .dcache_bsize = 32, 734 .dcache_bsize = 32,
684 .cpu_setup = __setup_cpu_603 735 .cpu_setup = __setup_cpu_603,
736 .platform = "ppc603",
685 }, 737 },
686 { /* default match, we assume split I/D cache & TB (non-601)... */ 738 { /* default match, we assume split I/D cache & TB (non-601)... */
687 .pvr_mask = 0x00000000, 739 .pvr_mask = 0x00000000,
@@ -691,6 +743,7 @@ struct cpu_spec cpu_specs[] = {
691 .cpu_user_features = COMMON_USER, 743 .cpu_user_features = COMMON_USER,
692 .icache_bsize = 32, 744 .icache_bsize = 32,
693 .dcache_bsize = 32, 745 .dcache_bsize = 32,
746 .platform = "ppc603",
694 }, 747 },
695#endif /* CLASSIC_PPC */ 748#endif /* CLASSIC_PPC */
696#ifdef CONFIG_8xx 749#ifdef CONFIG_8xx
@@ -704,6 +757,7 @@ struct cpu_spec cpu_specs[] = {
704 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 757 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
705 .icache_bsize = 16, 758 .icache_bsize = 16,
706 .dcache_bsize = 16, 759 .dcache_bsize = 16,
760 .platform = "ppc823",
707 }, 761 },
708#endif /* CONFIG_8xx */ 762#endif /* CONFIG_8xx */
709#ifdef CONFIG_40x 763#ifdef CONFIG_40x
@@ -715,6 +769,7 @@ struct cpu_spec cpu_specs[] = {
715 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 769 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
716 .icache_bsize = 16, 770 .icache_bsize = 16,
717 .dcache_bsize = 16, 771 .dcache_bsize = 16,
772 .platform = "ppc403",
718 }, 773 },
719 { /* 403GCX */ 774 { /* 403GCX */
720 .pvr_mask = 0xffffff00, 775 .pvr_mask = 0xffffff00,
@@ -725,6 +780,7 @@ struct cpu_spec cpu_specs[] = {
725 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 780 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
726 .icache_bsize = 16, 781 .icache_bsize = 16,
727 .dcache_bsize = 16, 782 .dcache_bsize = 16,
783 .platform = "ppc403",
728 }, 784 },
729 { /* 403G ?? */ 785 { /* 403G ?? */
730 .pvr_mask = 0xffff0000, 786 .pvr_mask = 0xffff0000,
@@ -734,6 +790,7 @@ struct cpu_spec cpu_specs[] = {
734 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 790 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
735 .icache_bsize = 16, 791 .icache_bsize = 16,
736 .dcache_bsize = 16, 792 .dcache_bsize = 16,
793 .platform = "ppc403",
737 }, 794 },
738 { /* 405GP */ 795 { /* 405GP */
739 .pvr_mask = 0xffff0000, 796 .pvr_mask = 0xffff0000,
@@ -744,6 +801,7 @@ struct cpu_spec cpu_specs[] = {
744 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 801 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
745 .icache_bsize = 32, 802 .icache_bsize = 32,
746 .dcache_bsize = 32, 803 .dcache_bsize = 32,
804 .platform = "ppc405",
747 }, 805 },
748 { /* STB 03xxx */ 806 { /* STB 03xxx */
749 .pvr_mask = 0xffff0000, 807 .pvr_mask = 0xffff0000,
@@ -754,6 +812,7 @@ struct cpu_spec cpu_specs[] = {
754 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 812 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
755 .icache_bsize = 32, 813 .icache_bsize = 32,
756 .dcache_bsize = 32, 814 .dcache_bsize = 32,
815 .platform = "ppc405",
757 }, 816 },
758 { /* STB 04xxx */ 817 { /* STB 04xxx */
759 .pvr_mask = 0xffff0000, 818 .pvr_mask = 0xffff0000,
@@ -764,6 +823,7 @@ struct cpu_spec cpu_specs[] = {
764 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 823 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
765 .icache_bsize = 32, 824 .icache_bsize = 32,
766 .dcache_bsize = 32, 825 .dcache_bsize = 32,
826 .platform = "ppc405",
767 }, 827 },
768 { /* NP405L */ 828 { /* NP405L */
769 .pvr_mask = 0xffff0000, 829 .pvr_mask = 0xffff0000,
@@ -774,6 +834,7 @@ struct cpu_spec cpu_specs[] = {
774 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 834 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
775 .icache_bsize = 32, 835 .icache_bsize = 32,
776 .dcache_bsize = 32, 836 .dcache_bsize = 32,
837 .platform = "ppc405",
777 }, 838 },
778 { /* NP4GS3 */ 839 { /* NP4GS3 */
779 .pvr_mask = 0xffff0000, 840 .pvr_mask = 0xffff0000,
@@ -784,6 +845,7 @@ struct cpu_spec cpu_specs[] = {
784 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 845 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
785 .icache_bsize = 32, 846 .icache_bsize = 32,
786 .dcache_bsize = 32, 847 .dcache_bsize = 32,
848 .platform = "ppc405",
787 }, 849 },
788 { /* NP405H */ 850 { /* NP405H */
789 .pvr_mask = 0xffff0000, 851 .pvr_mask = 0xffff0000,
@@ -794,6 +856,7 @@ struct cpu_spec cpu_specs[] = {
794 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 856 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
795 .icache_bsize = 32, 857 .icache_bsize = 32,
796 .dcache_bsize = 32, 858 .dcache_bsize = 32,
859 .platform = "ppc405",
797 }, 860 },
798 { /* 405GPr */ 861 { /* 405GPr */
799 .pvr_mask = 0xffff0000, 862 .pvr_mask = 0xffff0000,
@@ -804,6 +867,7 @@ struct cpu_spec cpu_specs[] = {
804 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 867 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
805 .icache_bsize = 32, 868 .icache_bsize = 32,
806 .dcache_bsize = 32, 869 .dcache_bsize = 32,
870 .platform = "ppc405",
807 }, 871 },
808 { /* STBx25xx */ 872 { /* STBx25xx */
809 .pvr_mask = 0xffff0000, 873 .pvr_mask = 0xffff0000,
@@ -814,6 +878,7 @@ struct cpu_spec cpu_specs[] = {
814 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 878 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
815 .icache_bsize = 32, 879 .icache_bsize = 32,
816 .dcache_bsize = 32, 880 .dcache_bsize = 32,
881 .platform = "ppc405",
817 }, 882 },
818 { /* 405LP */ 883 { /* 405LP */
819 .pvr_mask = 0xffff0000, 884 .pvr_mask = 0xffff0000,
@@ -823,6 +888,7 @@ struct cpu_spec cpu_specs[] = {
823 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 888 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
824 .icache_bsize = 32, 889 .icache_bsize = 32,
825 .dcache_bsize = 32, 890 .dcache_bsize = 32,
891 .platform = "ppc405",
826 }, 892 },
827 { /* Xilinx Virtex-II Pro */ 893 { /* Xilinx Virtex-II Pro */
828 .pvr_mask = 0xffff0000, 894 .pvr_mask = 0xffff0000,
@@ -833,6 +899,7 @@ struct cpu_spec cpu_specs[] = {
833 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 899 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
834 .icache_bsize = 32, 900 .icache_bsize = 32,
835 .dcache_bsize = 32, 901 .dcache_bsize = 32,
902 .platform = "ppc405",
836 }, 903 },
837 { /* 405EP */ 904 { /* 405EP */
838 .pvr_mask = 0xffff0000, 905 .pvr_mask = 0xffff0000,
@@ -843,6 +910,7 @@ struct cpu_spec cpu_specs[] = {
843 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 910 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
844 .icache_bsize = 32, 911 .icache_bsize = 32,
845 .dcache_bsize = 32, 912 .dcache_bsize = 32,
913 .platform = "ppc405",
846 }, 914 },
847 915
848#endif /* CONFIG_40x */ 916#endif /* CONFIG_40x */
@@ -852,81 +920,90 @@ struct cpu_spec cpu_specs[] = {
852 .pvr_value = 0x40000850, 920 .pvr_value = 0x40000850,
853 .cpu_name = "440EP Rev. A", 921 .cpu_name = "440EP Rev. A",
854 .cpu_features = CPU_FTRS_44X, 922 .cpu_features = CPU_FTRS_44X,
855 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */ 923 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
856 .icache_bsize = 32, 924 .icache_bsize = 32,
857 .dcache_bsize = 32, 925 .dcache_bsize = 32,
926 .platform = "ppc440",
858 }, 927 },
859 { 928 {
860 .pvr_mask = 0xf0000fff, 929 .pvr_mask = 0xf0000fff,
861 .pvr_value = 0x400008d3, 930 .pvr_value = 0x400008d3,
862 .cpu_name = "440EP Rev. B", 931 .cpu_name = "440EP Rev. B",
863 .cpu_features = CPU_FTRS_44X, 932 .cpu_features = CPU_FTRS_44X,
864 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */ 933 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
865 .icache_bsize = 32, 934 .icache_bsize = 32,
866 .dcache_bsize = 32, 935 .dcache_bsize = 32,
936 .platform = "ppc440",
867 }, 937 },
868 { /* 440GP Rev. B */ 938 { /* 440GP Rev. B */
869 .pvr_mask = 0xf0000fff, 939 .pvr_mask = 0xf0000fff,
870 .pvr_value = 0x40000440, 940 .pvr_value = 0x40000440,
871 .cpu_name = "440GP Rev. B", 941 .cpu_name = "440GP Rev. B",
872 .cpu_features = CPU_FTRS_44X, 942 .cpu_features = CPU_FTRS_44X,
873 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 943 .cpu_user_features = COMMON_USER_BOOKE,
874 .icache_bsize = 32, 944 .icache_bsize = 32,
875 .dcache_bsize = 32, 945 .dcache_bsize = 32,
946 .platform = "ppc440gp",
876 }, 947 },
877 { /* 440GP Rev. C */ 948 { /* 440GP Rev. C */
878 .pvr_mask = 0xf0000fff, 949 .pvr_mask = 0xf0000fff,
879 .pvr_value = 0x40000481, 950 .pvr_value = 0x40000481,
880 .cpu_name = "440GP Rev. C", 951 .cpu_name = "440GP Rev. C",
881 .cpu_features = CPU_FTRS_44X, 952 .cpu_features = CPU_FTRS_44X,
882 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 953 .cpu_user_features = COMMON_USER_BOOKE,
883 .icache_bsize = 32, 954 .icache_bsize = 32,
884 .dcache_bsize = 32, 955 .dcache_bsize = 32,
956 .platform = "ppc440gp",
885 }, 957 },
886 { /* 440GX Rev. A */ 958 { /* 440GX Rev. A */
887 .pvr_mask = 0xf0000fff, 959 .pvr_mask = 0xf0000fff,
888 .pvr_value = 0x50000850, 960 .pvr_value = 0x50000850,
889 .cpu_name = "440GX Rev. A", 961 .cpu_name = "440GX Rev. A",
890 .cpu_features = CPU_FTRS_44X, 962 .cpu_features = CPU_FTRS_44X,
891 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 963 .cpu_user_features = COMMON_USER_BOOKE,
892 .icache_bsize = 32, 964 .icache_bsize = 32,
893 .dcache_bsize = 32, 965 .dcache_bsize = 32,
966 .platform = "ppc440",
894 }, 967 },
895 { /* 440GX Rev. B */ 968 { /* 440GX Rev. B */
896 .pvr_mask = 0xf0000fff, 969 .pvr_mask = 0xf0000fff,
897 .pvr_value = 0x50000851, 970 .pvr_value = 0x50000851,
898 .cpu_name = "440GX Rev. B", 971 .cpu_name = "440GX Rev. B",
899 .cpu_features = CPU_FTRS_44X, 972 .cpu_features = CPU_FTRS_44X,
900 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 973 .cpu_user_features = COMMON_USER_BOOKE,
901 .icache_bsize = 32, 974 .icache_bsize = 32,
902 .dcache_bsize = 32, 975 .dcache_bsize = 32,
976 .platform = "ppc440",
903 }, 977 },
904 { /* 440GX Rev. C */ 978 { /* 440GX Rev. C */
905 .pvr_mask = 0xf0000fff, 979 .pvr_mask = 0xf0000fff,
906 .pvr_value = 0x50000892, 980 .pvr_value = 0x50000892,
907 .cpu_name = "440GX Rev. C", 981 .cpu_name = "440GX Rev. C",
908 .cpu_features = CPU_FTRS_44X, 982 .cpu_features = CPU_FTRS_44X,
909 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 983 .cpu_user_features = COMMON_USER_BOOKE,
910 .icache_bsize = 32, 984 .icache_bsize = 32,
911 .dcache_bsize = 32, 985 .dcache_bsize = 32,
986 .platform = "ppc440",
912 }, 987 },
913 { /* 440GX Rev. F */ 988 { /* 440GX Rev. F */
914 .pvr_mask = 0xf0000fff, 989 .pvr_mask = 0xf0000fff,
915 .pvr_value = 0x50000894, 990 .pvr_value = 0x50000894,
916 .cpu_name = "440GX Rev. F", 991 .cpu_name = "440GX Rev. F",
917 .cpu_features = CPU_FTRS_44X, 992 .cpu_features = CPU_FTRS_44X,
918 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 993 .cpu_user_features = COMMON_USER_BOOKE,
919 .icache_bsize = 32, 994 .icache_bsize = 32,
920 .dcache_bsize = 32, 995 .dcache_bsize = 32,
996 .platform = "ppc440",
921 }, 997 },
922 { /* 440SP Rev. A */ 998 { /* 440SP Rev. A */
923 .pvr_mask = 0xff000fff, 999 .pvr_mask = 0xff000fff,
924 .pvr_value = 0x53000891, 1000 .pvr_value = 0x53000891,
925 .cpu_name = "440SP Rev. A", 1001 .cpu_name = "440SP Rev. A",
926 .cpu_features = CPU_FTRS_44X, 1002 .cpu_features = CPU_FTRS_44X,
927 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1003 .cpu_user_features = COMMON_USER_BOOKE,
928 .icache_bsize = 32, 1004 .icache_bsize = 32,
929 .dcache_bsize = 32, 1005 .dcache_bsize = 32,
1006 .platform = "ppc440",
930 }, 1007 },
931 { /* 440SPe Rev. A */ 1008 { /* 440SPe Rev. A */
932 .pvr_mask = 0xff000fff, 1009 .pvr_mask = 0xff000fff,
@@ -934,9 +1011,10 @@ struct cpu_spec cpu_specs[] = {
934 .cpu_name = "440SPe Rev. A", 1011 .cpu_name = "440SPe Rev. A",
935 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 1012 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
936 CPU_FTR_USE_TB, 1013 CPU_FTR_USE_TB,
937 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1014 .cpu_user_features = COMMON_USER_BOOKE,
938 .icache_bsize = 32, 1015 .icache_bsize = 32,
939 .dcache_bsize = 32, 1016 .dcache_bsize = 32,
1017 .platform = "ppc440",
940 }, 1018 },
941#endif /* CONFIG_44x */ 1019#endif /* CONFIG_44x */
942#ifdef CONFIG_FSL_BOOKE 1020#ifdef CONFIG_FSL_BOOKE
@@ -946,10 +1024,11 @@ struct cpu_spec cpu_specs[] = {
946 .cpu_name = "e200z5", 1024 .cpu_name = "e200z5",
947 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1025 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
948 .cpu_features = CPU_FTRS_E200, 1026 .cpu_features = CPU_FTRS_E200,
949 .cpu_user_features = PPC_FEATURE_32 | 1027 .cpu_user_features = COMMON_USER_BOOKE |
950 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE | 1028 PPC_FEATURE_HAS_EFP_SINGLE |
951 PPC_FEATURE_UNIFIED_CACHE, 1029 PPC_FEATURE_UNIFIED_CACHE,
952 .dcache_bsize = 32, 1030 .dcache_bsize = 32,
1031 .platform = "ppc5554",
953 }, 1032 },
954 { /* e200z6 */ 1033 { /* e200z6 */
955 .pvr_mask = 0xfff00000, 1034 .pvr_mask = 0xfff00000,
@@ -957,11 +1036,12 @@ struct cpu_spec cpu_specs[] = {
957 .cpu_name = "e200z6", 1036 .cpu_name = "e200z6",
958 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1037 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
959 .cpu_features = CPU_FTRS_E200, 1038 .cpu_features = CPU_FTRS_E200,
960 .cpu_user_features = PPC_FEATURE_32 | 1039 .cpu_user_features = COMMON_USER_BOOKE |
961 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 1040 PPC_FEATURE_SPE_COMP |
962 PPC_FEATURE_HAS_EFP_SINGLE | 1041 PPC_FEATURE_HAS_EFP_SINGLE |
963 PPC_FEATURE_UNIFIED_CACHE, 1042 PPC_FEATURE_UNIFIED_CACHE,
964 .dcache_bsize = 32, 1043 .dcache_bsize = 32,
1044 .platform = "ppc5554",
965 }, 1045 },
966 { /* e500 */ 1046 { /* e500 */
967 .pvr_mask = 0xffff0000, 1047 .pvr_mask = 0xffff0000,
@@ -969,14 +1049,15 @@ struct cpu_spec cpu_specs[] = {
969 .cpu_name = "e500", 1049 .cpu_name = "e500",
970 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1050 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
971 .cpu_features = CPU_FTRS_E500, 1051 .cpu_features = CPU_FTRS_E500,
972 .cpu_user_features = PPC_FEATURE_32 | 1052 .cpu_user_features = COMMON_USER_BOOKE |
973 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 1053 PPC_FEATURE_SPE_COMP |
974 PPC_FEATURE_HAS_EFP_SINGLE, 1054 PPC_FEATURE_HAS_EFP_SINGLE,
975 .icache_bsize = 32, 1055 .icache_bsize = 32,
976 .dcache_bsize = 32, 1056 .dcache_bsize = 32,
977 .num_pmcs = 4, 1057 .num_pmcs = 4,
978 .oprofile_cpu_type = "ppc/e500", 1058 .oprofile_cpu_type = "ppc/e500",
979 .oprofile_type = BOOKE, 1059 .oprofile_type = PPC_OPROFILE_BOOKE,
1060 .platform = "ppc8540",
980 }, 1061 },
981 { /* e500v2 */ 1062 { /* e500v2 */
982 .pvr_mask = 0xffff0000, 1063 .pvr_mask = 0xffff0000,
@@ -984,14 +1065,16 @@ struct cpu_spec cpu_specs[] = {
984 .cpu_name = "e500v2", 1065 .cpu_name = "e500v2",
985 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1066 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
986 .cpu_features = CPU_FTRS_E500_2, 1067 .cpu_features = CPU_FTRS_E500_2,
987 .cpu_user_features = PPC_FEATURE_32 | 1068 .cpu_user_features = COMMON_USER_BOOKE |
988 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 1069 PPC_FEATURE_SPE_COMP |
989 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, 1070 PPC_FEATURE_HAS_EFP_SINGLE |
1071 PPC_FEATURE_HAS_EFP_DOUBLE,
990 .icache_bsize = 32, 1072 .icache_bsize = 32,
991 .dcache_bsize = 32, 1073 .dcache_bsize = 32,
992 .num_pmcs = 4, 1074 .num_pmcs = 4,
993 .oprofile_cpu_type = "ppc/e500", 1075 .oprofile_cpu_type = "ppc/e500",
994 .oprofile_type = BOOKE, 1076 .oprofile_type = PPC_OPROFILE_BOOKE,
1077 .platform = "ppc8548",
995 }, 1078 },
996#endif 1079#endif
997#if !CLASSIC_PPC 1080#if !CLASSIC_PPC
@@ -1003,6 +1086,7 @@ struct cpu_spec cpu_specs[] = {
1003 .cpu_user_features = PPC_FEATURE_32, 1086 .cpu_user_features = PPC_FEATURE_32,
1004 .icache_bsize = 32, 1087 .icache_bsize = 32,
1005 .dcache_bsize = 32, 1088 .dcache_bsize = 32,
1089 .platform = "powerpc",
1006 } 1090 }
1007#endif /* !CLASSIC_PPC */ 1091#endif /* !CLASSIC_PPC */
1008#endif /* CONFIG_PPC32 */ 1092#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 5f248e3fdf82..8c21d378f5d2 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -84,7 +84,10 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
84 * squirrelled away. ELF notes happen to provide 84 * squirrelled away. ELF notes happen to provide
85 * all of that that no need to invent something new. 85 * all of that that no need to invent something new.
86 */ 86 */
87 buf = &crash_notes[cpu][0]; 87 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
88 if (!buf)
89 return;
90
88 memset(&prstatus, 0, sizeof(prstatus)); 91 memset(&prstatus, 0, sizeof(prstatus));
89 prstatus.pr_pid = current->pid; 92 prstatus.pr_pid = current->pid;
90 elf_core_copy_regs(&prstatus.pr_reg, regs); 93 elf_core_copy_regs(&prstatus.pr_reg, regs);
@@ -93,76 +96,6 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
93 final_note(buf); 96 final_note(buf);
94} 97}
95 98
96/* FIXME Merge this with xmon_save_regs ?? */
97static inline void crash_get_current_regs(struct pt_regs *regs)
98{
99 unsigned long tmp1, tmp2;
100
101 __asm__ __volatile__ (
102 "std 0,0(%2)\n"
103 "std 1,8(%2)\n"
104 "std 2,16(%2)\n"
105 "std 3,24(%2)\n"
106 "std 4,32(%2)\n"
107 "std 5,40(%2)\n"
108 "std 6,48(%2)\n"
109 "std 7,56(%2)\n"
110 "std 8,64(%2)\n"
111 "std 9,72(%2)\n"
112 "std 10,80(%2)\n"
113 "std 11,88(%2)\n"
114 "std 12,96(%2)\n"
115 "std 13,104(%2)\n"
116 "std 14,112(%2)\n"
117 "std 15,120(%2)\n"
118 "std 16,128(%2)\n"
119 "std 17,136(%2)\n"
120 "std 18,144(%2)\n"
121 "std 19,152(%2)\n"
122 "std 20,160(%2)\n"
123 "std 21,168(%2)\n"
124 "std 22,176(%2)\n"
125 "std 23,184(%2)\n"
126 "std 24,192(%2)\n"
127 "std 25,200(%2)\n"
128 "std 26,208(%2)\n"
129 "std 27,216(%2)\n"
130 "std 28,224(%2)\n"
131 "std 29,232(%2)\n"
132 "std 30,240(%2)\n"
133 "std 31,248(%2)\n"
134 "mfmsr %0\n"
135 "std %0, 264(%2)\n"
136 "mfctr %0\n"
137 "std %0, 280(%2)\n"
138 "mflr %0\n"
139 "std %0, 288(%2)\n"
140 "bl 1f\n"
141 "1: mflr %1\n"
142 "std %1, 256(%2)\n"
143 "mtlr %0\n"
144 "mfxer %0\n"
145 "std %0, 296(%2)\n"
146 : "=&r" (tmp1), "=&r" (tmp2)
147 : "b" (regs));
148}
149
150/* We may have saved_regs from where the error came from
151 * or it is NULL if via a direct panic().
152 */
153static void crash_save_self(struct pt_regs *saved_regs)
154{
155 struct pt_regs regs;
156 int cpu;
157
158 cpu = smp_processor_id();
159 if (saved_regs)
160 memcpy(&regs, saved_regs, sizeof(regs));
161 else
162 crash_get_current_regs(&regs);
163 crash_save_this_cpu(&regs, cpu);
164}
165
166#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
167static atomic_t waiting_for_crash_ipi; 100static atomic_t waiting_for_crash_ipi;
168 101
@@ -260,5 +193,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
260 */ 193 */
261 crashing_cpu = smp_processor_id(); 194 crashing_cpu = smp_processor_id();
262 crash_kexec_prepare_cpus(); 195 crash_kexec_prepare_cpus();
263 crash_save_self(regs); 196 crash_save_this_cpu(regs, crashing_cpu);
264} 197}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 036b71d2adfc..d8da2a35c0a4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -988,7 +988,7 @@ _GLOBAL(enter_rtas)
988 stwu r1,-INT_FRAME_SIZE(r1) 988 stwu r1,-INT_FRAME_SIZE(r1)
989 mflr r0 989 mflr r0
990 stw r0,INT_FRAME_SIZE+4(r1) 990 stw r0,INT_FRAME_SIZE+4(r1)
991 LOADADDR(r4, rtas) 991 LOAD_REG_ADDR(r4, rtas)
992 lis r6,1f@ha /* physical return address for rtas */ 992 lis r6,1f@ha /* physical return address for rtas */
993 addi r6,r6,1f@l 993 addi r6,r6,1f@l
994 tophys(r6,r6) 994 tophys(r6,r6)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index aacebb33e98a..542036318866 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -511,7 +511,8 @@ restore:
511 cmpdi 0,r5,0 511 cmpdi 0,r5,0
512 beq 4f 512 beq 4f
513 /* Check for pending interrupts (iSeries) */ 513 /* Check for pending interrupts (iSeries) */
514 ld r3,PACALPPACA+LPPACAANYINT(r13) 514 ld r3,PACALPPACAPTR(r13)
515 ld r3,LPPACAANYINT(r3)
515 cmpdi r3,0 516 cmpdi r3,0
516 beq+ 4f /* skip do_IRQ if no interrupts */ 517 beq+ 4f /* skip do_IRQ if no interrupts */
517 518
@@ -689,9 +690,8 @@ _GLOBAL(enter_rtas)
689 std r6,PACASAVEDMSR(r13) 690 std r6,PACASAVEDMSR(r13)
690 691
691 /* Setup our real return addr */ 692 /* Setup our real return addr */
692 SET_REG_TO_LABEL(r4,.rtas_return_loc) 693 LOAD_REG_ADDR(r4,.rtas_return_loc)
693 SET_REG_TO_CONST(r9,PAGE_OFFSET) 694 clrldi r4,r4,2 /* convert to realmode address */
694 sub r4,r4,r9
695 mtlr r4 695 mtlr r4
696 696
697 li r0,0 697 li r0,0
@@ -706,7 +706,7 @@ _GLOBAL(enter_rtas)
706 sync /* disable interrupts so SRR0/1 */ 706 sync /* disable interrupts so SRR0/1 */
707 mtmsrd r0 /* don't get trashed */ 707 mtmsrd r0 /* don't get trashed */
708 708
709 SET_REG_TO_LABEL(r4,rtas) 709 LOAD_REG_ADDR(r4, rtas)
710 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 710 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
711 ld r4,RTASBASE(r4) /* get the rtas->base value */ 711 ld r4,RTASBASE(r4) /* get the rtas->base value */
712 712
@@ -718,8 +718,7 @@ _GLOBAL(enter_rtas)
718_STATIC(rtas_return_loc) 718_STATIC(rtas_return_loc)
719 /* relocation is off at this point */ 719 /* relocation is off at this point */
720 mfspr r4,SPRN_SPRG3 /* Get PACA */ 720 mfspr r4,SPRN_SPRG3 /* Get PACA */
721 SET_REG_TO_CONST(r5, PAGE_OFFSET) 721 clrldi r4,r4,2 /* convert to realmode address */
722 sub r4,r4,r5 /* RELOC the PACA base pointer */
723 722
724 mfmsr r6 723 mfmsr r6
725 li r0,MSR_RI 724 li r0,MSR_RI
@@ -728,7 +727,7 @@ _STATIC(rtas_return_loc)
728 mtmsrd r6 727 mtmsrd r6
729 728
730 ld r1,PACAR1(r4) /* Restore our SP */ 729 ld r1,PACAR1(r4) /* Restore our SP */
731 LOADADDR(r3,.rtas_restore_regs) 730 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
732 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 731 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
733 732
734 mtspr SPRN_SRR0,r3 733 mtspr SPRN_SRR0,r3
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index b780b42c95fc..e4362dfa37fb 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -39,9 +39,9 @@ _GLOBAL(load_up_fpu)
39 * to another. Instead we call giveup_fpu in switch_to. 39 * to another. Instead we call giveup_fpu in switch_to.
40 */ 40 */
41#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
42 LOADBASE(r3, last_task_used_math) 42 LOAD_REG_ADDRBASE(r3, last_task_used_math)
43 toreal(r3) 43 toreal(r3)
44 PPC_LL r4,OFF(last_task_used_math)(r3) 44 PPC_LL r4,ADDROFF(last_task_used_math)(r3)
45 PPC_LCMPI 0,r4,0 45 PPC_LCMPI 0,r4,0
46 beq 1f 46 beq 1f
47 toreal(r4) 47 toreal(r4)
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
77#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
78 subi r4,r5,THREAD 78 subi r4,r5,THREAD
79 fromreal(r4) 79 fromreal(r4)
80 PPC_STL r4,OFF(last_task_used_math)(r3) 80 PPC_STL r4,ADDROFF(last_task_used_math)(r3)
81#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
82 /* restore registers and return */ 82 /* restore registers and return */
83 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
@@ -113,8 +113,8 @@ _GLOBAL(giveup_fpu)
1131: 1131:
114#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
115 li r5,0 115 li r5,0
116 LOADBASE(r4,last_task_used_math) 116 LOAD_REG_ADDRBASE(r4,last_task_used_math)
117 PPC_STL r5,OFF(last_task_used_math)(r4) 117 PPC_STL r5,ADDROFF(last_task_used_math)(r4)
118#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
119 blr 119 blr
120 120
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1c066d125375..308268466342 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -154,12 +154,12 @@ _GLOBAL(__secondary_hold)
154 bne 100b 154 bne 100b
155 155
156#ifdef CONFIG_HMT 156#ifdef CONFIG_HMT
157 LOADADDR(r4, .hmt_init) 157 SET_REG_IMMEDIATE(r4, .hmt_init)
158 mtctr r4 158 mtctr r4
159 bctr 159 bctr
160#else 160#else
161#ifdef CONFIG_SMP 161#ifdef CONFIG_SMP
162 LOADADDR(r4, .pSeries_secondary_smp_init) 162 LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
163 mtctr r4 163 mtctr r4
164 mr r3,r24 164 mr r3,r24
165 bctr 165 bctr
@@ -205,9 +205,10 @@ exception_marker:
205#define EX_LR 72 205#define EX_LR 72
206 206
207/* 207/*
208 * We're short on space and time in the exception prolog, so we can't use 208 * We're short on space and time in the exception prolog, so we can't
209 * the normal LOADADDR macro. Normally we just need the low halfword of the 209 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
210 * address, but for Kdump we need the whole low word. 210 * low halfword of the address, but for Kdump we need the whole low
211 * word.
211 */ 212 */
212#ifdef CONFIG_CRASH_DUMP 213#ifdef CONFIG_CRASH_DUMP
213#define LOAD_HANDLER(reg, label) \ 214#define LOAD_HANDLER(reg, label) \
@@ -254,8 +255,9 @@ exception_marker:
254 255
255#define EXCEPTION_PROLOG_ISERIES_2 \ 256#define EXCEPTION_PROLOG_ISERIES_2 \
256 mfmsr r10; \ 257 mfmsr r10; \
257 ld r11,PACALPPACA+LPPACASRR0(r13); \ 258 ld r12,PACALPPACAPTR(r13); \
258 ld r12,PACALPPACA+LPPACASRR1(r13); \ 259 ld r11,LPPACASRR0(r12); \
260 ld r12,LPPACASRR1(r12); \
259 ori r10,r10,MSR_RI; \ 261 ori r10,r10,MSR_RI; \
260 mtmsrd r10,1 262 mtmsrd r10,1
261 263
@@ -634,7 +636,8 @@ data_access_slb_iSeries:
634 std r12,PACA_EXSLB+EX_R12(r13) 636 std r12,PACA_EXSLB+EX_R12(r13)
635 mfspr r10,SPRN_SPRG1 637 mfspr r10,SPRN_SPRG1
636 std r10,PACA_EXSLB+EX_R13(r13) 638 std r10,PACA_EXSLB+EX_R13(r13)
637 ld r12,PACALPPACA+LPPACASRR1(r13); 639 ld r12,PACALPPACAPTR(r13)
640 ld r12,LPPACASRR1(r12)
638 b .slb_miss_realmode 641 b .slb_miss_realmode
639 642
640 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 643 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
@@ -644,7 +647,8 @@ instruction_access_slb_iSeries:
644 mtspr SPRN_SPRG1,r13 /* save r13 */ 647 mtspr SPRN_SPRG1,r13 /* save r13 */
645 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 648 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
646 std r3,PACA_EXSLB+EX_R3(r13) 649 std r3,PACA_EXSLB+EX_R3(r13)
647 ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ 650 ld r3,PACALPPACAPTR(r13)
651 ld r3,LPPACASRR0(r3) /* get SRR0 value */
648 std r9,PACA_EXSLB+EX_R9(r13) 652 std r9,PACA_EXSLB+EX_R9(r13)
649 mfcr r9 653 mfcr r9
650#ifdef __DISABLED__ 654#ifdef __DISABLED__
@@ -656,7 +660,8 @@ instruction_access_slb_iSeries:
656 std r12,PACA_EXSLB+EX_R12(r13) 660 std r12,PACA_EXSLB+EX_R12(r13)
657 mfspr r10,SPRN_SPRG1 661 mfspr r10,SPRN_SPRG1
658 std r10,PACA_EXSLB+EX_R13(r13) 662 std r10,PACA_EXSLB+EX_R13(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13); 663 ld r12,PACALPPACAPTR(r13)
664 ld r12,LPPACASRR1(r12)
660 b .slb_miss_realmode 665 b .slb_miss_realmode
661 666
662#ifdef __DISABLED__ 667#ifdef __DISABLED__
@@ -713,7 +718,7 @@ system_reset_iSeries:
713 lbz r23,PACAPROCSTART(r13) /* Test if this processor 718 lbz r23,PACAPROCSTART(r13) /* Test if this processor
714 * should start */ 719 * should start */
715 sync 720 sync
716 LOADADDR(r3,current_set) 721 LOAD_REG_IMMEDIATE(r3,current_set)
717 sldi r28,r24,3 /* get current_set[cpu#] */ 722 sldi r28,r24,3 /* get current_set[cpu#] */
718 ldx r3,r3,r28 723 ldx r3,r3,r28
719 addi r1,r3,THREAD_SIZE 724 addi r1,r3,THREAD_SIZE
@@ -745,17 +750,19 @@ iSeries_secondary_smp_loop:
745 .globl decrementer_iSeries_masked 750 .globl decrementer_iSeries_masked
746decrementer_iSeries_masked: 751decrementer_iSeries_masked:
747 li r11,1 752 li r11,1
748 stb r11,PACALPPACA+LPPACADECRINT(r13) 753 ld r12,PACALPPACAPTR(r13)
749 LOADBASE(r12,tb_ticks_per_jiffy) 754 stb r11,LPPACADECRINT(r12)
750 lwz r12,OFF(tb_ticks_per_jiffy)(r12) 755 LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy)
756 lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12)
751 mtspr SPRN_DEC,r12 757 mtspr SPRN_DEC,r12
752 /* fall through */ 758 /* fall through */
753 759
754 .globl hardware_interrupt_iSeries_masked 760 .globl hardware_interrupt_iSeries_masked
755hardware_interrupt_iSeries_masked: 761hardware_interrupt_iSeries_masked:
756 mtcrf 0x80,r9 /* Restore regs */ 762 mtcrf 0x80,r9 /* Restore regs */
757 ld r11,PACALPPACA+LPPACASRR0(r13) 763 ld r12,PACALPPACAPTR(r13)
758 ld r12,PACALPPACA+LPPACASRR1(r13) 764 ld r11,LPPACASRR0(r12)
765 ld r12,LPPACASRR1(r12)
759 mtspr SPRN_SRR0,r11 766 mtspr SPRN_SRR0,r11
760 mtspr SPRN_SRR1,r12 767 mtspr SPRN_SRR1,r12
761 ld r9,PACA_EXGEN+EX_R9(r13) 768 ld r9,PACA_EXGEN+EX_R9(r13)
@@ -994,7 +1001,8 @@ _GLOBAL(slb_miss_realmode)
994 ld r3,PACA_EXSLB+EX_R3(r13) 1001 ld r3,PACA_EXSLB+EX_R3(r13)
995 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1002 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
996#ifdef CONFIG_PPC_ISERIES 1003#ifdef CONFIG_PPC_ISERIES
997 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ 1004 ld r11,PACALPPACAPTR(r13)
1005 ld r11,LPPACASRR0(r11) /* get SRR0 value */
998#endif /* CONFIG_PPC_ISERIES */ 1006#endif /* CONFIG_PPC_ISERIES */
999 1007
1000 mtlr r10 1008 mtlr r10
@@ -1412,7 +1420,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1412 * physical cpu id in r24, we need to search the pacas to find 1420 * physical cpu id in r24, we need to search the pacas to find
1413 * which logical id maps to our physical one. 1421 * which logical id maps to our physical one.
1414 */ 1422 */
1415 LOADADDR(r13, paca) /* Get base vaddr of paca array */ 1423 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */
1416 li r5,0 /* logical cpu id */ 1424 li r5,0 /* logical cpu id */
14171: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 14251: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1418 cmpw r6,r24 /* Compare to our id */ 1426 cmpw r6,r24 /* Compare to our id */
@@ -1446,8 +1454,8 @@ _GLOBAL(pSeries_secondary_smp_init)
1446#ifdef CONFIG_PPC_ISERIES 1454#ifdef CONFIG_PPC_ISERIES
1447_STATIC(__start_initialization_iSeries) 1455_STATIC(__start_initialization_iSeries)
1448 /* Clear out the BSS */ 1456 /* Clear out the BSS */
1449 LOADADDR(r11,__bss_stop) 1457 LOAD_REG_IMMEDIATE(r11,__bss_stop)
1450 LOADADDR(r8,__bss_start) 1458 LOAD_REG_IMMEDIATE(r8,__bss_start)
1451 sub r11,r11,r8 /* bss size */ 1459 sub r11,r11,r8 /* bss size */
1452 addi r11,r11,7 /* round up to an even double word */ 1460 addi r11,r11,7 /* round up to an even double word */
1453 rldicl. r11,r11,61,3 /* shift right by 3 */ 1461 rldicl. r11,r11,61,3 /* shift right by 3 */
@@ -1458,17 +1466,17 @@ _STATIC(__start_initialization_iSeries)
14583: stdu r0,8(r8) 14663: stdu r0,8(r8)
1459 bdnz 3b 1467 bdnz 3b
14604: 14684:
1461 LOADADDR(r1,init_thread_union) 1469 LOAD_REG_IMMEDIATE(r1,init_thread_union)
1462 addi r1,r1,THREAD_SIZE 1470 addi r1,r1,THREAD_SIZE
1463 li r0,0 1471 li r0,0
1464 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1472 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1465 1473
1466 LOADADDR(r3,cpu_specs) 1474 LOAD_REG_IMMEDIATE(r3,cpu_specs)
1467 LOADADDR(r4,cur_cpu_spec) 1475 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1468 li r5,0 1476 li r5,0
1469 bl .identify_cpu 1477 bl .identify_cpu
1470 1478
1471 LOADADDR(r2,__toc_start) 1479 LOAD_REG_IMMEDIATE(r2,__toc_start)
1472 addi r2,r2,0x4000 1480 addi r2,r2,0x4000
1473 addi r2,r2,0x4000 1481 addi r2,r2,0x4000
1474 1482
@@ -1528,7 +1536,7 @@ _GLOBAL(__start_initialization_multiplatform)
1528 li r24,0 1536 li r24,0
1529 1537
1530 /* Switch off MMU if not already */ 1538 /* Switch off MMU if not already */
1531 LOADADDR(r4, .__after_prom_start - KERNELBASE) 1539 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1532 add r4,r4,r30 1540 add r4,r4,r30
1533 bl .__mmu_off 1541 bl .__mmu_off
1534 b .__after_prom_start 1542 b .__after_prom_start
@@ -1548,7 +1556,7 @@ _STATIC(__boot_from_prom)
1548 /* put a relocation offset into r3 */ 1556 /* put a relocation offset into r3 */
1549 bl .reloc_offset 1557 bl .reloc_offset
1550 1558
1551 LOADADDR(r2,__toc_start) 1559 LOAD_REG_IMMEDIATE(r2,__toc_start)
1552 addi r2,r2,0x4000 1560 addi r2,r2,0x4000
1553 addi r2,r2,0x4000 1561 addi r2,r2,0x4000
1554 1562
@@ -1588,9 +1596,9 @@ _STATIC(__after_prom_start)
1588 */ 1596 */
1589 bl .reloc_offset 1597 bl .reloc_offset
1590 mr r26,r3 1598 mr r26,r3
1591 SET_REG_TO_CONST(r27,KERNELBASE) 1599 LOAD_REG_IMMEDIATE(r27, KERNELBASE)
1592 1600
1593 LOADADDR(r3, PHYSICAL_START) /* target addr */ 1601 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */
1594 1602
1595 // XXX FIXME: Use phys returned by OF (r30) 1603 // XXX FIXME: Use phys returned by OF (r30)
1596 add r4,r27,r26 /* source addr */ 1604 add r4,r27,r26 /* source addr */
@@ -1598,7 +1606,7 @@ _STATIC(__after_prom_start)
1598 /* i.e. where we are running */ 1606 /* i.e. where we are running */
1599 /* the source addr */ 1607 /* the source addr */
1600 1608
1601 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ 1609 LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1602 sub r5,r5,r27 1610 sub r5,r5,r27
1603 1611
1604 li r6,0x100 /* Start offset, the first 0x100 */ 1612 li r6,0x100 /* Start offset, the first 0x100 */
@@ -1608,11 +1616,11 @@ _STATIC(__after_prom_start)
1608 /* this includes the code being */ 1616 /* this includes the code being */
1609 /* executed here. */ 1617 /* executed here. */
1610 1618
1611 LOADADDR(r0, 4f) /* Jump to the copy of this code */ 1619 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */
1612 mtctr r0 /* that we just made/relocated */ 1620 mtctr r0 /* that we just made/relocated */
1613 bctr 1621 bctr
1614 1622
16154: LOADADDR(r5,klimit) 16234: LOAD_REG_IMMEDIATE(r5,klimit)
1616 add r5,r5,r26 1624 add r5,r5,r26
1617 ld r5,0(r5) /* get the value of klimit */ 1625 ld r5,0(r5) /* get the value of klimit */
1618 sub r5,r5,r27 1626 sub r5,r5,r27
@@ -1694,7 +1702,7 @@ _GLOBAL(pmac_secondary_start)
1694 mtmsrd r3 /* RI on */ 1702 mtmsrd r3 /* RI on */
1695 1703
1696 /* Set up a paca value for this processor. */ 1704 /* Set up a paca value for this processor. */
1697 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1705 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
1698 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1706 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1699 add r13,r13,r4 /* for this processor. */ 1707 add r13,r13,r4 /* for this processor. */
1700 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1708 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
@@ -1731,7 +1739,7 @@ _GLOBAL(__secondary_start)
1731 bl .early_setup_secondary 1739 bl .early_setup_secondary
1732 1740
1733 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1741 /* Initialize the kernel stack. Just a repeat for iSeries. */
1734 LOADADDR(r3,current_set) 1742 LOAD_REG_ADDR(r3, current_set)
1735 sldi r28,r24,3 /* get current_set[cpu#] */ 1743 sldi r28,r24,3 /* get current_set[cpu#] */
1736 ldx r1,r3,r28 1744 ldx r1,r3,r28
1737 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1745 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
@@ -1742,8 +1750,8 @@ _GLOBAL(__secondary_start)
1742 mtlr r7 1750 mtlr r7
1743 1751
1744 /* enable MMU and jump to start_secondary */ 1752 /* enable MMU and jump to start_secondary */
1745 LOADADDR(r3,.start_secondary_prolog) 1753 LOAD_REG_ADDR(r3, .start_secondary_prolog)
1746 SET_REG_TO_CONST(r4, MSR_KERNEL) 1754 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1747#ifdef DO_SOFT_DISABLE 1755#ifdef DO_SOFT_DISABLE
1748 ori r4,r4,MSR_EE 1756 ori r4,r4,MSR_EE
1749#endif 1757#endif
@@ -1792,8 +1800,8 @@ _STATIC(start_here_multiplatform)
1792 * be detached from the kernel completely. Besides, we need 1800 * be detached from the kernel completely. Besides, we need
1793 * to clear it now for kexec-style entry. 1801 * to clear it now for kexec-style entry.
1794 */ 1802 */
1795 LOADADDR(r11,__bss_stop) 1803 LOAD_REG_IMMEDIATE(r11,__bss_stop)
1796 LOADADDR(r8,__bss_start) 1804 LOAD_REG_IMMEDIATE(r8,__bss_start)
1797 sub r11,r11,r8 /* bss size */ 1805 sub r11,r11,r8 /* bss size */
1798 addi r11,r11,7 /* round up to an even double word */ 1806 addi r11,r11,7 /* round up to an even double word */
1799 rldicl. r11,r11,61,3 /* shift right by 3 */ 1807 rldicl. r11,r11,61,3 /* shift right by 3 */
@@ -1831,7 +1839,7 @@ _STATIC(start_here_multiplatform)
1831 /* up the htab. This is done because we have relocated the */ 1839 /* up the htab. This is done because we have relocated the */
1832 /* kernel but are still running in real mode. */ 1840 /* kernel but are still running in real mode. */
1833 1841
1834 LOADADDR(r3,init_thread_union) 1842 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1835 add r3,r3,r26 1843 add r3,r3,r26
1836 1844
1837 /* set up a stack pointer (physical address) */ 1845 /* set up a stack pointer (physical address) */
@@ -1840,14 +1848,14 @@ _STATIC(start_here_multiplatform)
1840 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1848 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1841 1849
1842 /* set up the TOC (physical address) */ 1850 /* set up the TOC (physical address) */
1843 LOADADDR(r2,__toc_start) 1851 LOAD_REG_IMMEDIATE(r2,__toc_start)
1844 addi r2,r2,0x4000 1852 addi r2,r2,0x4000
1845 addi r2,r2,0x4000 1853 addi r2,r2,0x4000
1846 add r2,r2,r26 1854 add r2,r2,r26
1847 1855
1848 LOADADDR(r3,cpu_specs) 1856 LOAD_REG_IMMEDIATE(r3, cpu_specs)
1849 add r3,r3,r26 1857 add r3,r3,r26
1850 LOADADDR(r4,cur_cpu_spec) 1858 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1851 add r4,r4,r26 1859 add r4,r4,r26
1852 mr r5,r26 1860 mr r5,r26
1853 bl .identify_cpu 1861 bl .identify_cpu
@@ -1863,11 +1871,11 @@ _STATIC(start_here_multiplatform)
1863 * nowhere it can be initialized differently before we reach this 1871 * nowhere it can be initialized differently before we reach this
1864 * code 1872 * code
1865 */ 1873 */
1866 LOADADDR(r27, boot_cpuid) 1874 LOAD_REG_IMMEDIATE(r27, boot_cpuid)
1867 add r27,r27,r26 1875 add r27,r27,r26
1868 lwz r27,0(r27) 1876 lwz r27,0(r27)
1869 1877
1870 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1878 LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
1871 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1879 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1872 add r13,r13,r24 /* for this processor. */ 1880 add r13,r13,r24 /* for this processor. */
1873 add r13,r13,r26 /* convert to physical addr */ 1881 add r13,r13,r26 /* convert to physical addr */
@@ -1880,8 +1888,8 @@ _STATIC(start_here_multiplatform)
1880 mr r3,r31 1888 mr r3,r31
1881 bl .early_setup 1889 bl .early_setup
1882 1890
1883 LOADADDR(r3,.start_here_common) 1891 LOAD_REG_IMMEDIATE(r3, .start_here_common)
1884 SET_REG_TO_CONST(r4, MSR_KERNEL) 1892 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1885 mtspr SPRN_SRR0,r3 1893 mtspr SPRN_SRR0,r3
1886 mtspr SPRN_SRR1,r4 1894 mtspr SPRN_SRR1,r4
1887 rfid 1895 rfid
@@ -1895,7 +1903,7 @@ _STATIC(start_here_common)
1895 /* The following code sets up the SP and TOC now that we are */ 1903 /* The following code sets up the SP and TOC now that we are */
1896 /* running with translation enabled. */ 1904 /* running with translation enabled. */
1897 1905
1898 LOADADDR(r3,init_thread_union) 1906 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1899 1907
1900 /* set up the stack */ 1908 /* set up the stack */
1901 addi r1,r3,THREAD_SIZE 1909 addi r1,r3,THREAD_SIZE
@@ -1908,16 +1916,16 @@ _STATIC(start_here_common)
1908 li r3,0 1916 li r3,0
1909 bl .do_cpu_ftr_fixups 1917 bl .do_cpu_ftr_fixups
1910 1918
1911 LOADADDR(r26, boot_cpuid) 1919 LOAD_REG_IMMEDIATE(r26, boot_cpuid)
1912 lwz r26,0(r26) 1920 lwz r26,0(r26)
1913 1921
1914 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1922 LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
1915 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1923 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1916 add r13,r13,r24 /* for this processor. */ 1924 add r13,r13,r24 /* for this processor. */
1917 mtspr SPRN_SPRG3,r13 1925 mtspr SPRN_SPRG3,r13
1918 1926
1919 /* ptr to current */ 1927 /* ptr to current */
1920 LOADADDR(r4,init_task) 1928 LOAD_REG_IMMEDIATE(r4, init_task)
1921 std r4,PACACURRENT(r13) 1929 std r4,PACACURRENT(r13)
1922 1930
1923 /* Load the TOC */ 1931 /* Load the TOC */
@@ -1940,7 +1948,7 @@ _STATIC(start_here_common)
1940 1948
1941_GLOBAL(hmt_init) 1949_GLOBAL(hmt_init)
1942#ifdef CONFIG_HMT 1950#ifdef CONFIG_HMT
1943 LOADADDR(r5, hmt_thread_data) 1951 LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
1944 mfspr r7,SPRN_PVR 1952 mfspr r7,SPRN_PVR
1945 srwi r7,r7,16 1953 srwi r7,r7,16
1946 cmpwi r7,0x34 /* Pulsar */ 1954 cmpwi r7,0x34 /* Pulsar */
@@ -1961,7 +1969,7 @@ _GLOBAL(hmt_init)
1961 b 101f 1969 b 101f
1962 1970
1963__hmt_secondary_hold: 1971__hmt_secondary_hold:
1964 LOADADDR(r5, hmt_thread_data) 1972 LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
1965 clrldi r5,r5,4 1973 clrldi r5,r5,4
1966 li r7,0 1974 li r7,0
1967 mfspr r6,SPRN_PIR 1975 mfspr r6,SPRN_PIR
@@ -1989,7 +1997,7 @@ __hmt_secondary_hold:
1989 1997
1990#ifdef CONFIG_HMT 1998#ifdef CONFIG_HMT
1991_GLOBAL(hmt_start_secondary) 1999_GLOBAL(hmt_start_secondary)
1992 LOADADDR(r4,__hmt_secondary_hold) 2000 LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold)
1993 clrldi r4,r4,4 2001 clrldi r4,r4,4
1994 mtspr SPRN_NIADORM, r4 2002 mtspr SPRN_NIADORM, r4
1995 mfspr r4, SPRN_MSRDORM 2003 mfspr r4, SPRN_MSRDORM
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 1494e2f177f7..c16b4afab582 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -38,14 +38,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
38 /* We must dynamically check for the NAP feature as it 38 /* We must dynamically check for the NAP feature as it
39 * can be cleared by CPU init after the fixups are done 39 * can be cleared by CPU init after the fixups are done
40 */ 40 */
41 LOADBASE(r3,cur_cpu_spec) 41 LOAD_REG_ADDRBASE(r3,cur_cpu_spec)
42 ld r4,OFF(cur_cpu_spec)(r3) 42 ld r4,ADDROFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4) 43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP 44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr 45 beqlr
46 /* Now check if user or arch enabled NAP mode */ 46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap) 47 LOAD_REG_ADDRBASE(r3,powersave_nap)
48 lwz r4,OFF(powersave_nap)(r3) 48 lwz r4,ADDROFF(powersave_nap)(r3)
49 cmpwi 0,r4,0 49 cmpwi 0,r4,0
50 beqlr 50 beqlr
51 51
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5651032d8706..d1fffce86df9 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -238,14 +238,10 @@ void do_IRQ(struct pt_regs *regs)
238 irq_exit(); 238 irq_exit();
239 239
240#ifdef CONFIG_PPC_ISERIES 240#ifdef CONFIG_PPC_ISERIES
241 { 241 if (get_lppaca()->int_dword.fields.decr_int) {
242 struct paca_struct *lpaca = get_paca(); 242 get_lppaca()->int_dword.fields.decr_int = 0;
243 243 /* Signal a fake decrementer interrupt */
244 if (lpaca->lppaca.int_dword.fields.decr_int) { 244 timer_interrupt(regs);
245 lpaca->lppaca.int_dword.fields.decr_int = 0;
246 /* Signal a fake decrementer interrupt */
247 timer_interrupt(regs);
248 }
249 } 245 }
250#endif 246#endif
251} 247}
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 9dda16ccde78..1ae96a8ed7e2 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -55,15 +55,13 @@ static unsigned long get_purr(void)
55{ 55{
56 unsigned long sum_purr = 0; 56 unsigned long sum_purr = 0;
57 int cpu; 57 int cpu;
58 struct paca_struct *lpaca;
59 58
60 for_each_cpu(cpu) { 59 for_each_cpu(cpu) {
61 lpaca = paca + cpu; 60 sum_purr += lppaca[cpu].emulated_time_base;
62 sum_purr += lpaca->lppaca.emulated_time_base;
63 61
64#ifdef PURR_DEBUG 62#ifdef PURR_DEBUG
65 printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n", 63 printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
66 cpu, lpaca->lppaca.emulated_time_base); 64 cpu, lppaca[cpu].emulated_time_base);
67#endif 65#endif
68 } 66 }
69 return sum_purr; 67 return sum_purr;
@@ -79,12 +77,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
79 unsigned long pool_id, lp_index; 77 unsigned long pool_id, lp_index;
80 int shared, entitled_capacity, max_entitled_capacity; 78 int shared, entitled_capacity, max_entitled_capacity;
81 int processors, max_processors; 79 int processors, max_processors;
82 struct paca_struct *lpaca = get_paca();
83 unsigned long purr = get_purr(); 80 unsigned long purr = get_purr();
84 81
85 seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); 82 seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
86 83
87 shared = (int)(lpaca->lppaca_ptr->shared_proc); 84 shared = (int)(get_lppaca()->shared_proc);
88 seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n", 85 seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
89 e2a(xItExtVpdPanel.mfgID[2]), 86 e2a(xItExtVpdPanel.mfgID[2]),
90 e2a(xItExtVpdPanel.mfgID[3]), 87 e2a(xItExtVpdPanel.mfgID[3]),
@@ -402,7 +399,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
402 (h_resource >> 0 * 8) & 0xffff); 399 (h_resource >> 0 * 8) & 0xffff);
403 400
404 /* pool related entries are apropriate for shared configs */ 401 /* pool related entries are apropriate for shared configs */
405 if (paca[0].lppaca.shared_proc) { 402 if (lppaca[0].shared_proc) {
406 403
407 h_pic(&pool_idle_time, &pool_procs); 404 h_pic(&pool_idle_time, &pool_procs);
408 405
@@ -451,7 +448,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
451 seq_printf(m, "partition_potential_processors=%d\n", 448 seq_printf(m, "partition_potential_processors=%d\n",
452 partition_potential_processors); 449 partition_potential_processors);
453 450
454 seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc); 451 seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
455 452
456 return 0; 453 return 0;
457} 454}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 01d0d97a16e1..be982023409e 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -68,7 +68,7 @@ _GLOBAL(reloc_offset)
68 mflr r0 68 mflr r0
69 bl 1f 69 bl 1f
701: mflr r3 701: mflr r3
71 LOADADDR(r4,1b) 71 LOAD_REG_IMMEDIATE(r4,1b)
72 subf r3,r4,r3 72 subf r3,r4,r3
73 mtlr r0 73 mtlr r0
74 blr 74 blr
@@ -80,7 +80,7 @@ _GLOBAL(add_reloc_offset)
80 mflr r0 80 mflr r0
81 bl 1f 81 bl 1f
821: mflr r5 821: mflr r5
83 LOADADDR(r4,1b) 83 LOAD_REG_IMMEDIATE(r4,1b)
84 subf r5,r4,r5 84 subf r5,r4,r5
85 add r3,r3,r5 85 add r3,r3,r5
86 mtlr r0 86 mtlr r0
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae48a002f81a..2778cce058e2 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -39,7 +39,7 @@ _GLOBAL(reloc_offset)
39 mflr r0 39 mflr r0
40 bl 1f 40 bl 1f
411: mflr r3 411: mflr r3
42 LOADADDR(r4,1b) 42 LOAD_REG_IMMEDIATE(r4,1b)
43 subf r3,r4,r3 43 subf r3,r4,r3
44 mtlr r0 44 mtlr r0
45 blr 45 blr
@@ -51,7 +51,7 @@ _GLOBAL(add_reloc_offset)
51 mflr r0 51 mflr r0
52 bl 1f 52 bl 1f
531: mflr r5 531: mflr r5
54 LOADADDR(r4,1b) 54 LOAD_REG_IMMEDIATE(r4,1b)
55 subf r5,r4,r5 55 subf r5,r4,r5
56 add r3,r3,r5 56 add r3,r3,r5
57 mtlr r0 57 mtlr r0
@@ -498,15 +498,15 @@ _GLOBAL(identify_cpu)
498 */ 498 */
499_GLOBAL(do_cpu_ftr_fixups) 499_GLOBAL(do_cpu_ftr_fixups)
500 /* Get CPU 0 features */ 500 /* Get CPU 0 features */
501 LOADADDR(r6,cur_cpu_spec) 501 LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
502 sub r6,r6,r3 502 sub r6,r6,r3
503 ld r4,0(r6) 503 ld r4,0(r6)
504 sub r4,r4,r3 504 sub r4,r4,r3
505 ld r4,CPU_SPEC_FEATURES(r4) 505 ld r4,CPU_SPEC_FEATURES(r4)
506 /* Get the fixup table */ 506 /* Get the fixup table */
507 LOADADDR(r6,__start___ftr_fixup) 507 LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
508 sub r6,r6,r3 508 sub r6,r6,r3
509 LOADADDR(r7,__stop___ftr_fixup) 509 LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
510 sub r7,r7,r3 510 sub r7,r7,r3
511 /* Do the fixup */ 511 /* Do the fixup */
5121: cmpld r6,r7 5121: cmpld r6,r7
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 7065e40e2f42..22d83d4d1af5 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -132,6 +132,8 @@ static int of_device_resume(struct device * dev)
132struct bus_type of_platform_bus_type = { 132struct bus_type of_platform_bus_type = {
133 .name = "of_platform", 133 .name = "of_platform",
134 .match = of_platform_bus_match, 134 .match = of_platform_bus_match,
135 .probe = of_device_probe,
136 .remove = of_device_remove,
135 .suspend = of_device_suspend, 137 .suspend = of_device_suspend,
136 .resume = of_device_resume, 138 .resume = of_device_resume,
137}; 139};
@@ -150,8 +152,6 @@ int of_register_driver(struct of_platform_driver *drv)
150 /* initialize common driver fields */ 152 /* initialize common driver fields */
151 drv->driver.name = drv->name; 153 drv->driver.name = drv->name;
152 drv->driver.bus = &of_platform_bus_type; 154 drv->driver.bus = &of_platform_bus_type;
153 drv->driver.probe = of_device_probe;
154 drv->driver.remove = of_device_remove;
155 155
156 /* register with core */ 156 /* register with core */
157 count = driver_register(&drv->driver); 157 count = driver_register(&drv->driver);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 999bdd816769..5d1b708086bd 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -25,6 +25,28 @@
25 * field correctly */ 25 * field correctly */
26extern unsigned long __toc_start; 26extern unsigned long __toc_start;
27 27
28/*
29 * iSeries structure which the hypervisor knows about - this structure
30 * should not cross a page boundary. The vpa_init/register_vpa call
31 * is now known to fail if the lppaca structure crosses a page
32 * boundary. The lppaca is also used on POWER5 pSeries boxes. The
33 * lppaca is 640 bytes long, and cannot readily change since the
34 * hypervisor knows its layout, so a 1kB alignment will suffice to
35 * ensure that it doesn't cross a page boundary.
36 */
37struct lppaca lppaca[] = {
38 [0 ... (NR_CPUS-1)] = {
39 .desc = 0xd397d781, /* "LpPa" */
40 .size = sizeof(struct lppaca),
41 .dyn_proc_status = 2,
42 .decr_val = 0x00ff0000,
43 .fpregs_in_use = 1,
44 .end_of_quantum = 0xfffffffffffffffful,
45 .slb_count = 64,
46 .vmxregs_in_use = 0,
47 },
48};
49
28/* The Paca is an array with one entry per processor. Each contains an 50/* The Paca is an array with one entry per processor. Each contains an
29 * lppaca, which contains the information shared between the 51 * lppaca, which contains the information shared between the
30 * hypervisor and Linux. 52 * hypervisor and Linux.
@@ -35,27 +57,17 @@ extern unsigned long __toc_start;
35 * processor (not thread). 57 * processor (not thread).
36 */ 58 */
37#define PACA_INIT_COMMON(number, start, asrr, asrv) \ 59#define PACA_INIT_COMMON(number, start, asrr, asrv) \
60 .lppaca_ptr = &lppaca[number], \
38 .lock_token = 0x8000, \ 61 .lock_token = 0x8000, \
39 .paca_index = (number), /* Paca Index */ \ 62 .paca_index = (number), /* Paca Index */ \
40 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ 63 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
41 .stab_real = (asrr), /* Real pointer to segment table */ \ 64 .stab_real = (asrr), /* Real pointer to segment table */ \
42 .stab_addr = (asrv), /* Virt pointer to segment table */ \ 65 .stab_addr = (asrv), /* Virt pointer to segment table */ \
43 .cpu_start = (start), /* Processor start */ \ 66 .cpu_start = (start), /* Processor start */ \
44 .hw_cpu_id = 0xffff, \ 67 .hw_cpu_id = 0xffff,
45 .lppaca = { \
46 .desc = 0xd397d781, /* "LpPa" */ \
47 .size = sizeof(struct lppaca), \
48 .dyn_proc_status = 2, \
49 .decr_val = 0x00ff0000, \
50 .fpregs_in_use = 1, \
51 .end_of_quantum = 0xfffffffffffffffful, \
52 .slb_count = 64, \
53 .vmxregs_in_use = 0, \
54 }, \
55 68
56#ifdef CONFIG_PPC_ISERIES 69#ifdef CONFIG_PPC_ISERIES
57#define PACA_INIT_ISERIES(number) \ 70#define PACA_INIT_ISERIES(number) \
58 .lppaca_ptr = &paca[number].lppaca, \
59 .reg_save_ptr = &iseries_reg_save[number], 71 .reg_save_ptr = &iseries_reg_save[number],
60 72
61#define PACA_INIT(number) \ 73#define PACA_INIT(number) \
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
new file mode 100644
index 000000000000..704c846b2b0f
--- /dev/null
+++ b/arch/powerpc/kernel/pci_32.c
@@ -0,0 +1,1897 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/config.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/delay.h>
9#include <linux/string.h>
10#include <linux/init.h>
11#include <linux/capability.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/bootmem.h>
15
16#include <asm/processor.h>
17#include <asm/io.h>
18#include <asm/prom.h>
19#include <asm/sections.h>
20#include <asm/pci-bridge.h>
21#include <asm/byteorder.h>
22#include <asm/irq.h>
23#include <asm/uaccess.h>
24#include <asm/machdep.h>
25
26#undef DEBUG
27
28#ifdef DEBUG
29#define DBG(x...) printk(x)
30#else
31#define DBG(x...)
32#endif
33
34unsigned long isa_io_base = 0;
35unsigned long isa_mem_base = 0;
36unsigned long pci_dram_offset = 0;
37int pcibios_assign_bus_offset = 1;
38
39void pcibios_make_OF_bus_map(void);
40
41static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
42static int probe_resource(struct pci_bus *parent, struct resource *pr,
43 struct resource *res, struct resource **conflict);
44static void update_bridge_base(struct pci_bus *bus, int i);
45static void pcibios_fixup_resources(struct pci_dev* dev);
46static void fixup_broken_pcnet32(struct pci_dev* dev);
47static int reparent_resources(struct resource *parent, struct resource *res);
48static void fixup_cpc710_pci64(struct pci_dev* dev);
49#ifdef CONFIG_PPC_OF
50static u8* pci_to_OF_bus_map;
51#endif
52
53/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs
55 */
56int pci_assign_all_buses;
57
58struct pci_controller* hose_head;
59struct pci_controller** hose_tail = &hose_head;
60
61static int pci_bus_count;
62
63static void
64fixup_broken_pcnet32(struct pci_dev* dev)
65{
66 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
67 dev->vendor = PCI_VENDOR_ID_AMD;
68 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
69 }
70}
71DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
72
73static void
74fixup_cpc710_pci64(struct pci_dev* dev)
75{
76 /* Hide the PCI64 BARs from the kernel as their content doesn't
77 * fit well in the resource management
78 */
79 dev->resource[0].start = dev->resource[0].end = 0;
80 dev->resource[0].flags = 0;
81 dev->resource[1].start = dev->resource[1].end = 0;
82 dev->resource[1].flags = 0;
83}
84DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
85
86static void
87pcibios_fixup_resources(struct pci_dev *dev)
88{
89 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
90 int i;
91 unsigned long offset;
92
93 if (!hose) {
94 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
95 return;
96 }
97 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
98 struct resource *res = dev->resource + i;
99 if (!res->flags)
100 continue;
101 if (res->end == 0xffffffff) {
102 DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
103 pci_name(dev), i, res->start, res->end);
104 res->end -= res->start;
105 res->start = 0;
106 res->flags |= IORESOURCE_UNSET;
107 continue;
108 }
109 offset = 0;
110 if (res->flags & IORESOURCE_MEM) {
111 offset = hose->pci_mem_offset;
112 } else if (res->flags & IORESOURCE_IO) {
113 offset = (unsigned long) hose->io_base_virt
114 - isa_io_base;
115 }
116 if (offset != 0) {
117 res->start += offset;
118 res->end += offset;
119#ifdef DEBUG
120 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
121 i, res->flags, pci_name(dev),
122 res->start - offset, res->start);
123#endif
124 }
125 }
126
127 /* Call machine specific resource fixup */
128 if (ppc_md.pcibios_fixup_resources)
129 ppc_md.pcibios_fixup_resources(dev);
130}
131DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
132
133void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
134 struct resource *res)
135{
136 unsigned long offset = 0;
137 struct pci_controller *hose = dev->sysdata;
138
139 if (hose && res->flags & IORESOURCE_IO)
140 offset = (unsigned long)hose->io_base_virt - isa_io_base;
141 else if (hose && res->flags & IORESOURCE_MEM)
142 offset = hose->pci_mem_offset;
143 region->start = res->start - offset;
144 region->end = res->end - offset;
145}
146EXPORT_SYMBOL(pcibios_resource_to_bus);
147
148void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
149 struct pci_bus_region *region)
150{
151 unsigned long offset = 0;
152 struct pci_controller *hose = dev->sysdata;
153
154 if (hose && res->flags & IORESOURCE_IO)
155 offset = (unsigned long)hose->io_base_virt - isa_io_base;
156 else if (hose && res->flags & IORESOURCE_MEM)
157 offset = hose->pci_mem_offset;
158 res->start = region->start + offset;
159 res->end = region->end + offset;
160}
161EXPORT_SYMBOL(pcibios_bus_to_resource);
162
163/*
164 * We need to avoid collisions with `mirrored' VGA ports
165 * and other strange ISA hardware, so we always want the
166 * addresses to be allocated in the 0x000-0x0ff region
167 * modulo 0x400.
168 *
169 * Why? Because some silly external IO cards only decode
170 * the low 10 bits of the IO address. The 0x00-0xff region
171 * is reserved for motherboard devices that decode all 16
172 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
173 * but we want to try to avoid allocating at 0x2900-0x2bff
174 * which might have be mirrored at 0x0100-0x03ff..
175 */
176void pcibios_align_resource(void *data, struct resource *res, unsigned long size,
177 unsigned long align)
178{
179 struct pci_dev *dev = data;
180
181 if (res->flags & IORESOURCE_IO) {
182 unsigned long start = res->start;
183
184 if (size > 0x100) {
185 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
186 " (%ld bytes)\n", pci_name(dev),
187 dev->resource - res, size);
188 }
189
190 if (start & 0x300) {
191 start = (start + 0x3ff) & ~0x3ff;
192 res->start = start;
193 }
194 }
195}
196EXPORT_SYMBOL(pcibios_align_resource);
197
198/*
199 * Handle resources of PCI devices. If the world were perfect, we could
200 * just allocate all the resource regions and do nothing more. It isn't.
201 * On the other hand, we cannot just re-allocate all devices, as it would
202 * require us to know lots of host bridge internals. So we attempt to
203 * keep as much of the original configuration as possible, but tweak it
204 * when it's found to be wrong.
205 *
206 * Known BIOS problems we have to work around:
207 * - I/O or memory regions not configured
208 * - regions configured, but not enabled in the command register
209 * - bogus I/O addresses above 64K used
210 * - expansion ROMs left enabled (this may sound harmless, but given
211 * the fact the PCI specs explicitly allow address decoders to be
212 * shared between expansion ROMs and other resource regions, it's
213 * at least dangerous)
214 *
215 * Our solution:
216 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
217 * This gives us fixed barriers on where we can allocate.
218 * (2) Allocate resources for all enabled devices. If there is
219 * a collision, just mark the resource as unallocated. Also
220 * disable expansion ROMs during this step.
221 * (3) Try to allocate resources for disabled devices. If the
222 * resources were assigned correctly, everything goes well,
223 * if they weren't, they won't disturb allocation of other
224 * resources.
225 * (4) Assign new addresses to resources which were either
226 * not configured at all or misconfigured. If explicitly
227 * requested by the user, configure expansion ROM address
228 * as well.
229 */
230
231static void __init
232pcibios_allocate_bus_resources(struct list_head *bus_list)
233{
234 struct pci_bus *bus;
235 int i;
236 struct resource *res, *pr;
237
238 /* Depth-First Search on bus tree */
239 list_for_each_entry(bus, bus_list, node) {
240 for (i = 0; i < 4; ++i) {
241 if ((res = bus->resource[i]) == NULL || !res->flags
242 || res->start > res->end)
243 continue;
244 if (bus->parent == NULL)
245 pr = (res->flags & IORESOURCE_IO)?
246 &ioport_resource: &iomem_resource;
247 else {
248 pr = pci_find_parent_resource(bus->self, res);
249 if (pr == res) {
250 /* this happens when the generic PCI
251 * code (wrongly) decides that this
252 * bridge is transparent -- paulus
253 */
254 continue;
255 }
256 }
257
258 DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n",
259 res->start, res->end, res->flags, pr);
260 if (pr) {
261 if (request_resource(pr, res) == 0)
262 continue;
263 /*
264 * Must be a conflict with an existing entry.
265 * Move that entry (or entries) under the
266 * bridge resource and try again.
267 */
268 if (reparent_resources(pr, res) == 0)
269 continue;
270 }
271 printk(KERN_ERR "PCI: Cannot allocate resource region "
272 "%d of PCI bridge %d\n", i, bus->number);
273 if (pci_relocate_bridge_resource(bus, i))
274 bus->resource[i] = NULL;
275 }
276 pcibios_allocate_bus_resources(&bus->children);
277 }
278}
279
280/*
281 * Reparent resource children of pr that conflict with res
282 * under res, and make res replace those children.
283 */
284static int __init
285reparent_resources(struct resource *parent, struct resource *res)
286{
287 struct resource *p, **pp;
288 struct resource **firstpp = NULL;
289
290 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
291 if (p->end < res->start)
292 continue;
293 if (res->end < p->start)
294 break;
295 if (p->start < res->start || p->end > res->end)
296 return -1; /* not completely contained */
297 if (firstpp == NULL)
298 firstpp = pp;
299 }
300 if (firstpp == NULL)
301 return -1; /* didn't find any conflicting entries? */
302 res->parent = parent;
303 res->child = *firstpp;
304 res->sibling = *pp;
305 *firstpp = res;
306 *pp = NULL;
307 for (p = res->child; p != NULL; p = p->sibling) {
308 p->parent = res;
309 DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n",
310 p->name, p->start, p->end, res->name);
311 }
312 return 0;
313}
314
315/*
316 * A bridge has been allocated a range which is outside the range
317 * of its parent bridge, so it needs to be moved.
318 */
319static int __init
320pci_relocate_bridge_resource(struct pci_bus *bus, int i)
321{
322 struct resource *res, *pr, *conflict;
323 unsigned long try, size;
324 int j;
325 struct pci_bus *parent = bus->parent;
326
327 if (parent == NULL) {
328 /* shouldn't ever happen */
329 printk(KERN_ERR "PCI: can't move host bridge resource\n");
330 return -1;
331 }
332 res = bus->resource[i];
333 if (res == NULL)
334 return -1;
335 pr = NULL;
336 for (j = 0; j < 4; j++) {
337 struct resource *r = parent->resource[j];
338 if (!r)
339 continue;
340 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
341 continue;
342 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
343 pr = r;
344 break;
345 }
346 if (res->flags & IORESOURCE_PREFETCH)
347 pr = r;
348 }
349 if (pr == NULL)
350 return -1;
351 size = res->end - res->start;
352 if (pr->start > pr->end || size > pr->end - pr->start)
353 return -1;
354 try = pr->end;
355 for (;;) {
356 res->start = try - size;
357 res->end = try;
358 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
359 break;
360 if (conflict->start <= pr->start + size)
361 return -1;
362 try = conflict->start - 1;
363 }
364 if (request_resource(pr, res)) {
365 DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n",
366 res->start, res->end);
367 return -1; /* "can't happen" */
368 }
369 update_bridge_base(bus, i);
370 printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n",
371 bus->number, i, res->start, res->end);
372 return 0;
373}
374
375static int __init
376probe_resource(struct pci_bus *parent, struct resource *pr,
377 struct resource *res, struct resource **conflict)
378{
379 struct pci_bus *bus;
380 struct pci_dev *dev;
381 struct resource *r;
382 int i;
383
384 for (r = pr->child; r != NULL; r = r->sibling) {
385 if (r->end >= res->start && res->end >= r->start) {
386 *conflict = r;
387 return 1;
388 }
389 }
390 list_for_each_entry(bus, &parent->children, node) {
391 for (i = 0; i < 4; ++i) {
392 if ((r = bus->resource[i]) == NULL)
393 continue;
394 if (!r->flags || r->start > r->end || r == res)
395 continue;
396 if (pci_find_parent_resource(bus->self, r) != pr)
397 continue;
398 if (r->end >= res->start && res->end >= r->start) {
399 *conflict = r;
400 return 1;
401 }
402 }
403 }
404 list_for_each_entry(dev, &parent->devices, bus_list) {
405 for (i = 0; i < 6; ++i) {
406 r = &dev->resource[i];
407 if (!r->flags || (r->flags & IORESOURCE_UNSET))
408 continue;
409 if (pci_find_parent_resource(dev, r) != pr)
410 continue;
411 if (r->end >= res->start && res->end >= r->start) {
412 *conflict = r;
413 return 1;
414 }
415 }
416 }
417 return 0;
418}
419
420static void __init
421update_bridge_base(struct pci_bus *bus, int i)
422{
423 struct resource *res = bus->resource[i];
424 u8 io_base_lo, io_limit_lo;
425 u16 mem_base, mem_limit;
426 u16 cmd;
427 unsigned long start, end, off;
428 struct pci_dev *dev = bus->self;
429 struct pci_controller *hose = dev->sysdata;
430
431 if (!hose) {
432 printk("update_bridge_base: no hose?\n");
433 return;
434 }
435 pci_read_config_word(dev, PCI_COMMAND, &cmd);
436 pci_write_config_word(dev, PCI_COMMAND,
437 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
438 if (res->flags & IORESOURCE_IO) {
439 off = (unsigned long) hose->io_base_virt - isa_io_base;
440 start = res->start - off;
441 end = res->end - off;
442 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
443 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
444 if (end > 0xffff) {
445 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
446 start >> 16);
447 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
448 end >> 16);
449 io_base_lo |= PCI_IO_RANGE_TYPE_32;
450 } else
451 io_base_lo |= PCI_IO_RANGE_TYPE_16;
452 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
453 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
454
455 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
456 == IORESOURCE_MEM) {
457 off = hose->pci_mem_offset;
458 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
460 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
461 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
462
463 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
464 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
465 off = hose->pci_mem_offset;
466 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
467 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
468 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
469 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
470
471 } else {
472 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
473 pci_name(dev), i, res->flags);
474 }
475 pci_write_config_word(dev, PCI_COMMAND, cmd);
476}
477
478static inline void alloc_resource(struct pci_dev *dev, int idx)
479{
480 struct resource *pr, *r = &dev->resource[idx];
481
482 DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n",
483 pci_name(dev), idx, r->start, r->end, r->flags);
484 pr = pci_find_parent_resource(dev, r);
485 if (!pr || request_resource(pr, r) < 0) {
486 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
487 " of device %s\n", idx, pci_name(dev));
488 if (pr)
489 DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n",
490 pr, pr->start, pr->end, pr->flags);
491 /* We'll assign a new address later */
492 r->flags |= IORESOURCE_UNSET;
493 r->end -= r->start;
494 r->start = 0;
495 }
496}
497
498static void __init
499pcibios_allocate_resources(int pass)
500{
501 struct pci_dev *dev = NULL;
502 int idx, disabled;
503 u16 command;
504 struct resource *r;
505
506 for_each_pci_dev(dev) {
507 pci_read_config_word(dev, PCI_COMMAND, &command);
508 for (idx = 0; idx < 6; idx++) {
509 r = &dev->resource[idx];
510 if (r->parent) /* Already allocated */
511 continue;
512 if (!r->flags || (r->flags & IORESOURCE_UNSET))
513 continue; /* Not assigned at all */
514 if (r->flags & IORESOURCE_IO)
515 disabled = !(command & PCI_COMMAND_IO);
516 else
517 disabled = !(command & PCI_COMMAND_MEMORY);
518 if (pass == disabled)
519 alloc_resource(dev, idx);
520 }
521 if (pass)
522 continue;
523 r = &dev->resource[PCI_ROM_RESOURCE];
524 if (r->flags & IORESOURCE_ROM_ENABLE) {
525 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
526 u32 reg;
527 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
528 r->flags &= ~IORESOURCE_ROM_ENABLE;
529 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
530 pci_write_config_dword(dev, dev->rom_base_reg,
531 reg & ~PCI_ROM_ADDRESS_ENABLE);
532 }
533 }
534}
535
536static void __init
537pcibios_assign_resources(void)
538{
539 struct pci_dev *dev = NULL;
540 int idx;
541 struct resource *r;
542
543 for_each_pci_dev(dev) {
544 int class = dev->class >> 8;
545
546 /* Don't touch classless devices and host bridges */
547 if (!class || class == PCI_CLASS_BRIDGE_HOST)
548 continue;
549
550 for (idx = 0; idx < 6; idx++) {
551 r = &dev->resource[idx];
552
553 /*
554 * We shall assign a new address to this resource,
555 * either because the BIOS (sic) forgot to do so
556 * or because we have decided the old address was
557 * unusable for some reason.
558 */
559 if ((r->flags & IORESOURCE_UNSET) && r->end &&
560 (!ppc_md.pcibios_enable_device_hook ||
561 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
562 r->flags &= ~IORESOURCE_UNSET;
563 pci_assign_resource(dev, idx);
564 }
565 }
566
567#if 0 /* don't assign ROMs */
568 r = &dev->resource[PCI_ROM_RESOURCE];
569 r->end -= r->start;
570 r->start = 0;
571 if (r->end)
572 pci_assign_resource(dev, PCI_ROM_RESOURCE);
573#endif
574 }
575}
576
577
578int
579pcibios_enable_resources(struct pci_dev *dev, int mask)
580{
581 u16 cmd, old_cmd;
582 int idx;
583 struct resource *r;
584
585 pci_read_config_word(dev, PCI_COMMAND, &cmd);
586 old_cmd = cmd;
587 for (idx=0; idx<6; idx++) {
588 /* Only set up the requested stuff */
589 if (!(mask & (1<<idx)))
590 continue;
591
592 r = &dev->resource[idx];
593 if (r->flags & IORESOURCE_UNSET) {
594 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
595 return -EINVAL;
596 }
597 if (r->flags & IORESOURCE_IO)
598 cmd |= PCI_COMMAND_IO;
599 if (r->flags & IORESOURCE_MEM)
600 cmd |= PCI_COMMAND_MEMORY;
601 }
602 if (dev->resource[PCI_ROM_RESOURCE].start)
603 cmd |= PCI_COMMAND_MEMORY;
604 if (cmd != old_cmd) {
605 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
606 pci_write_config_word(dev, PCI_COMMAND, cmd);
607 }
608 return 0;
609}
610
611static int next_controller_index;
612
613struct pci_controller * __init
614pcibios_alloc_controller(void)
615{
616 struct pci_controller *hose;
617
618 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
619 memset(hose, 0, sizeof(struct pci_controller));
620
621 *hose_tail = hose;
622 hose_tail = &hose->next;
623
624 hose->index = next_controller_index++;
625
626 return hose;
627}
628
629#ifdef CONFIG_PPC_OF
630/*
631 * Functions below are used on OpenFirmware machines.
632 */
633static void
634make_one_node_map(struct device_node* node, u8 pci_bus)
635{
636 int *bus_range;
637 int len;
638
639 if (pci_bus >= pci_bus_count)
640 return;
641 bus_range = (int *) get_property(node, "bus-range", &len);
642 if (bus_range == NULL || len < 2 * sizeof(int)) {
643 printk(KERN_WARNING "Can't get bus-range for %s, "
644 "assuming it starts at 0\n", node->full_name);
645 pci_to_OF_bus_map[pci_bus] = 0;
646 } else
647 pci_to_OF_bus_map[pci_bus] = bus_range[0];
648
649 for (node=node->child; node != 0;node = node->sibling) {
650 struct pci_dev* dev;
651 unsigned int *class_code, *reg;
652
653 class_code = (unsigned int *) get_property(node, "class-code", NULL);
654 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
655 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
656 continue;
657 reg = (unsigned int *)get_property(node, "reg", NULL);
658 if (!reg)
659 continue;
660 dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
661 if (!dev || !dev->subordinate)
662 continue;
663 make_one_node_map(node, dev->subordinate->number);
664 }
665}
666
667void
668pcibios_make_OF_bus_map(void)
669{
670 int i;
671 struct pci_controller* hose;
672 u8* of_prop_map;
673
674 pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
675 if (!pci_to_OF_bus_map) {
676 printk(KERN_ERR "Can't allocate OF bus map !\n");
677 return;
678 }
679
680 /* We fill the bus map with invalid values, that helps
681 * debugging.
682 */
683 for (i=0; i<pci_bus_count; i++)
684 pci_to_OF_bus_map[i] = 0xff;
685
686 /* For each hose, we begin searching bridges */
687 for(hose=hose_head; hose; hose=hose->next) {
688 struct device_node* node;
689 node = (struct device_node *)hose->arch_data;
690 if (!node)
691 continue;
692 make_one_node_map(node, hose->first_busno);
693 }
694 of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
695 if (of_prop_map)
696 memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
697#ifdef DEBUG
698 printk("PCI->OF bus map:\n");
699 for (i=0; i<pci_bus_count; i++) {
700 if (pci_to_OF_bus_map[i] == 0xff)
701 continue;
702 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
703 }
704#endif
705}
706
707typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
708
709static struct device_node*
710scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
711{
712 struct device_node* sub_node;
713
714 for (; node != 0;node = node->sibling) {
715 unsigned int *class_code;
716
717 if (filter(node, data))
718 return node;
719
720 /* For PCI<->PCI bridges or CardBus bridges, we go down
721 * Note: some OFs create a parent node "multifunc-device" as
722 * a fake root for all functions of a multi-function device,
723 * we go down them as well.
724 */
725 class_code = (unsigned int *) get_property(node, "class-code", NULL);
726 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
727 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
728 strcmp(node->name, "multifunc-device"))
729 continue;
730 sub_node = scan_OF_pci_childs(node->child, filter, data);
731 if (sub_node)
732 return sub_node;
733 }
734 return NULL;
735}
736
737static int
738scan_OF_pci_childs_iterator(struct device_node* node, void* data)
739{
740 unsigned int *reg;
741 u8* fdata = (u8*)data;
742
743 reg = (unsigned int *) get_property(node, "reg", NULL);
744 if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
745 && ((reg[0] >> 16) & 0xff) == fdata[0])
746 return 1;
747 return 0;
748}
749
750static struct device_node*
751scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
752{
753 u8 filter_data[2] = {bus, dev_fn};
754
755 return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
756}
757
758/*
759 * Scans the OF tree for a device node matching a PCI device
760 */
761struct device_node *
762pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
763{
764 struct pci_controller *hose;
765 struct device_node *node;
766 int busnr;
767
768 if (!have_of)
769 return NULL;
770
771 /* Lookup the hose */
772 busnr = bus->number;
773 hose = pci_bus_to_hose(busnr);
774 if (!hose)
775 return NULL;
776
777 /* Check it has an OF node associated */
778 node = (struct device_node *) hose->arch_data;
779 if (!node)
780 return NULL;
781
782 /* Fixup bus number according to what OF think it is. */
783#ifdef CONFIG_PPC_PMAC
784 /* The G5 need a special case here. Basically, we don't remap all
785 * busses on it so we don't create the pci-OF-map. However, we do
786 * remap the AGP bus and so have to deal with it. A future better
787 * fix has to be done by making the remapping per-host and always
788 * filling the pci_to_OF map. --BenH
789 */
790 if (_machine == _MACH_Pmac && busnr >= 0xf0)
791 busnr -= 0xf0;
792 else
793#endif
794 if (pci_to_OF_bus_map)
795 busnr = pci_to_OF_bus_map[busnr];
796 if (busnr == 0xff)
797 return NULL;
798
799 /* Now, lookup childs of the hose */
800 return scan_OF_childs_for_device(node->child, busnr, devfn);
801}
802EXPORT_SYMBOL(pci_busdev_to_OF_node);
803
804struct device_node*
805pci_device_to_OF_node(struct pci_dev *dev)
806{
807 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
808}
809EXPORT_SYMBOL(pci_device_to_OF_node);
810
811/* This routine is meant to be used early during boot, when the
812 * PCI bus numbers have not yet been assigned, and you need to
813 * issue PCI config cycles to an OF device.
814 * It could also be used to "fix" RTAS config cycles if you want
815 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
816 * config cycles.
817 */
818struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
819{
820 if (!have_of)
821 return NULL;
822 while(node) {
823 struct pci_controller* hose;
824 for (hose=hose_head;hose;hose=hose->next)
825 if (hose->arch_data == node)
826 return hose;
827 node=node->parent;
828 }
829 return NULL;
830}
831
832static int
833find_OF_pci_device_filter(struct device_node* node, void* data)
834{
835 return ((void *)node == data);
836}
837
838/*
839 * Returns the PCI device matching a given OF node
840 */
841int
842pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
843{
844 unsigned int *reg;
845 struct pci_controller* hose;
846 struct pci_dev* dev = NULL;
847
848 if (!have_of)
849 return -ENODEV;
850 /* Make sure it's really a PCI device */
851 hose = pci_find_hose_for_OF_device(node);
852 if (!hose || !hose->arch_data)
853 return -ENODEV;
854 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
855 find_OF_pci_device_filter, (void *)node))
856 return -ENODEV;
857 reg = (unsigned int *) get_property(node, "reg", NULL);
858 if (!reg)
859 return -ENODEV;
860 *bus = (reg[0] >> 16) & 0xff;
861 *devfn = ((reg[0] >> 8) & 0xff);
862
863 /* Ok, here we need some tweak. If we have already renumbered
864 * all busses, we can't rely on the OF bus number any more.
865 * the pci_to_OF_bus_map is not enough as several PCI busses
866 * may match the same OF bus number.
867 */
868 if (!pci_to_OF_bus_map)
869 return 0;
870
871 for_each_pci_dev(dev)
872 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
873 dev->devfn == *devfn) {
874 *bus = dev->bus->number;
875 pci_dev_put(dev);
876 return 0;
877 }
878
879 return -ENODEV;
880}
881EXPORT_SYMBOL(pci_device_from_OF_node);
882
883void __init
884pci_process_bridge_OF_ranges(struct pci_controller *hose,
885 struct device_node *dev, int primary)
886{
887 static unsigned int static_lc_ranges[256] __initdata;
888 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
889 unsigned int size;
890 int rlen = 0, orig_rlen;
891 int memno = 0;
892 struct resource *res;
893 int np, na = prom_n_addr_cells(dev);
894 np = na + 5;
895
896 /* First we try to merge ranges to fix a problem with some pmacs
897 * that can have more than 3 ranges, fortunately using contiguous
898 * addresses -- BenH
899 */
900 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
901 if (!dt_ranges)
902 return;
903 /* Sanity check, though hopefully that never happens */
904 if (rlen > sizeof(static_lc_ranges)) {
905 printk(KERN_WARNING "OF ranges property too large !\n");
906 rlen = sizeof(static_lc_ranges);
907 }
908 lc_ranges = static_lc_ranges;
909 memcpy(lc_ranges, dt_ranges, rlen);
910 orig_rlen = rlen;
911
912 /* Let's work on a copy of the "ranges" property instead of damaging
913 * the device-tree image in memory
914 */
915 ranges = lc_ranges;
916 prev = NULL;
917 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
918 if (prev) {
919 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
920 (prev[2] + prev[na+4]) == ranges[2] &&
921 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
922 prev[na+4] += ranges[na+4];
923 ranges[0] = 0;
924 ranges += np;
925 continue;
926 }
927 }
928 prev = ranges;
929 ranges += np;
930 }
931
932 /*
933 * The ranges property is laid out as an array of elements,
934 * each of which comprises:
935 * cells 0 - 2: a PCI address
936 * cells 3 or 3+4: a CPU physical address
937 * (size depending on dev->n_addr_cells)
938 * cells 4+5 or 5+6: the size of the range
939 */
940 ranges = lc_ranges;
941 rlen = orig_rlen;
942 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
943 res = NULL;
944 size = ranges[na+4];
945 switch ((ranges[0] >> 24) & 0x3) {
946 case 1: /* I/O space */
947 if (ranges[2] != 0)
948 break;
949 hose->io_base_phys = ranges[na+2];
950 /* limit I/O space to 16MB */
951 if (size > 0x01000000)
952 size = 0x01000000;
953 hose->io_base_virt = ioremap(ranges[na+2], size);
954 if (primary)
955 isa_io_base = (unsigned long) hose->io_base_virt;
956 res = &hose->io_resource;
957 res->flags = IORESOURCE_IO;
958 res->start = ranges[2];
959 DBG("PCI: IO 0x%lx -> 0x%lx\n",
960 res->start, res->start + size - 1);
961 break;
962 case 2: /* memory space */
963 memno = 0;
964 if (ranges[1] == 0 && ranges[2] == 0
965 && ranges[na+4] <= (16 << 20)) {
966 /* 1st 16MB, i.e. ISA memory area */
967 if (primary)
968 isa_mem_base = ranges[na+2];
969 memno = 1;
970 }
971 while (memno < 3 && hose->mem_resources[memno].flags)
972 ++memno;
973 if (memno == 0)
974 hose->pci_mem_offset = ranges[na+2] - ranges[2];
975 if (memno < 3) {
976 res = &hose->mem_resources[memno];
977 res->flags = IORESOURCE_MEM;
978 if(ranges[0] & 0x40000000)
979 res->flags |= IORESOURCE_PREFETCH;
980 res->start = ranges[na+2];
981 DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno,
982 res->start, res->start + size - 1);
983 }
984 break;
985 }
986 if (res != NULL) {
987 res->name = dev->full_name;
988 res->end = res->start + size - 1;
989 res->parent = NULL;
990 res->sibling = NULL;
991 res->child = NULL;
992 }
993 ranges += np;
994 }
995}
996
997/* We create the "pci-OF-bus-map" property now so it appears in the
998 * /proc device tree
999 */
1000void __init
1001pci_create_OF_bus_map(void)
1002{
1003 struct property* of_prop;
1004
1005 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1006 if (of_prop && find_path_device("/")) {
1007 memset(of_prop, -1, sizeof(struct property) + 256);
1008 of_prop->name = "pci-OF-bus-map";
1009 of_prop->length = 256;
1010 of_prop->value = (unsigned char *)&of_prop[1];
1011 prom_add_property(find_path_device("/"), of_prop);
1012 }
1013}
1014
1015static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1016{
1017 struct pci_dev *pdev;
1018 struct device_node *np;
1019
1020 pdev = to_pci_dev (dev);
1021 np = pci_device_to_OF_node(pdev);
1022 if (np == NULL || np->full_name == NULL)
1023 return 0;
1024 return sprintf(buf, "%s", np->full_name);
1025}
1026static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1027
1028#else /* CONFIG_PPC_OF */
1029void pcibios_make_OF_bus_map(void)
1030{
1031}
1032#endif /* CONFIG_PPC_OF */
1033
1034/* Add sysfs properties */
1035void pcibios_add_platform_entries(struct pci_dev *pdev)
1036{
1037#ifdef CONFIG_PPC_OF
1038 device_create_file(&pdev->dev, &dev_attr_devspec);
1039#endif /* CONFIG_PPC_OF */
1040}
1041
1042
1043#ifdef CONFIG_PPC_PMAC
1044/*
1045 * This set of routines checks for PCI<->PCI bridges that have closed
1046 * IO resources and have child devices. It tries to re-open an IO
1047 * window on them.
1048 *
1049 * This is a _temporary_ fix to workaround a problem with Apple's OF
1050 * closing IO windows on P2P bridges when the OF drivers of cards
1051 * below this bridge don't claim any IO range (typically ATI or
1052 * Adaptec).
1053 *
1054 * A more complete fix would be to use drivers/pci/setup-bus.c, which
1055 * involves a working pcibios_fixup_pbus_ranges(), some more care about
1056 * ordering when creating the host bus resources, and maybe a few more
1057 * minor tweaks
1058 */
1059
1060/* Initialize bridges with base/limit values we have collected */
1061static void __init
1062do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1063{
1064 struct pci_dev *bridge = bus->self;
1065 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1066 u32 l;
1067 u16 w;
1068 struct resource res;
1069
1070 if (bus->resource[0] == NULL)
1071 return;
1072 res = *(bus->resource[0]);
1073
1074 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1075 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1076 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
1077 DBG(" IO window: %08lx-%08lx\n", res.start, res.end);
1078
1079 /* Set up the top and bottom of the PCI I/O segment for this bus. */
1080 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1081 l &= 0xffff000f;
1082 l |= (res.start >> 8) & 0x00f0;
1083 l |= res.end & 0xf000;
1084 pci_write_config_dword(bridge, PCI_IO_BASE, l);
1085
1086 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1087 l = (res.start >> 16) | (res.end & 0xffff0000);
1088 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1089 }
1090
1091 pci_read_config_word(bridge, PCI_COMMAND, &w);
1092 w |= PCI_COMMAND_IO;
1093 pci_write_config_word(bridge, PCI_COMMAND, w);
1094
1095#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1096 if (enable_vga) {
1097 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1098 w |= PCI_BRIDGE_CTL_VGA;
1099 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1100 }
1101#endif
1102}
1103
1104/* This function is pretty basic and actually quite broken for the
1105 * general case, it's enough for us right now though. It's supposed
1106 * to tell us if we need to open an IO range at all or not and what
1107 * size.
1108 */
1109static int __init
1110check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1111{
1112 struct pci_dev *dev;
1113 int i;
1114 int rc = 0;
1115
1116#define push_end(res, size) do { unsigned long __sz = (size) ; \
1117 res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \
1118 } while (0)
1119
1120 list_for_each_entry(dev, &bus->devices, bus_list) {
1121 u16 class = dev->class >> 8;
1122
1123 if (class == PCI_CLASS_DISPLAY_VGA ||
1124 class == PCI_CLASS_NOT_DEFINED_VGA)
1125 *found_vga = 1;
1126 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1127 rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1128 if (class == PCI_CLASS_BRIDGE_CARDBUS)
1129 push_end(res, 0xfff);
1130
1131 for (i=0; i<PCI_NUM_RESOURCES; i++) {
1132 struct resource *r;
1133 unsigned long r_size;
1134
1135 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1136 && i >= PCI_BRIDGE_RESOURCES)
1137 continue;
1138 r = &dev->resource[i];
1139 r_size = r->end - r->start;
1140 if (r_size < 0xfff)
1141 r_size = 0xfff;
1142 if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1143 rc = 1;
1144 push_end(res, r_size);
1145 }
1146 }
1147 }
1148
1149 return rc;
1150}
1151
1152/* Here we scan all P2P bridges of a given level that have a closed
1153 * IO window. Note that the test for the presence of a VGA card should
1154 * be improved to take into account already configured P2P bridges,
1155 * currently, we don't see them and might end up configuring 2 bridges
1156 * with VGA pass through enabled
1157 */
1158static void __init
1159do_fixup_p2p_level(struct pci_bus *bus)
1160{
1161 struct pci_bus *b;
1162 int i, parent_io;
1163 int has_vga = 0;
1164
1165 for (parent_io=0; parent_io<4; parent_io++)
1166 if (bus->resource[parent_io]
1167 && bus->resource[parent_io]->flags & IORESOURCE_IO)
1168 break;
1169 if (parent_io >= 4)
1170 return;
1171
1172 list_for_each_entry(b, &bus->children, node) {
1173 struct pci_dev *d = b->self;
1174 struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1175 struct resource *res = b->resource[0];
1176 struct resource tmp_res;
1177 unsigned long max;
1178 int found_vga = 0;
1179
1180 memset(&tmp_res, 0, sizeof(tmp_res));
1181 tmp_res.start = bus->resource[parent_io]->start;
1182
1183 /* We don't let low addresses go through that closed P2P bridge, well,
1184 * that may not be necessary but I feel safer that way
1185 */
1186 if (tmp_res.start == 0)
1187 tmp_res.start = 0x1000;
1188
1189 if (!list_empty(&b->devices) && res && res->flags == 0 &&
1190 res != bus->resource[parent_io] &&
1191 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1192 check_for_io_childs(b, &tmp_res, &found_vga)) {
1193 u8 io_base_lo;
1194
1195 printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1196
1197 if (found_vga) {
1198 if (has_vga) {
1199 printk(KERN_WARNING "Skipping VGA, already active"
1200 " on bus segment\n");
1201 found_vga = 0;
1202 } else
1203 has_vga = 1;
1204 }
1205 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1206
1207 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1208 max = ((unsigned long) hose->io_base_virt
1209 - isa_io_base) + 0xffffffff;
1210 else
1211 max = ((unsigned long) hose->io_base_virt
1212 - isa_io_base) + 0xffff;
1213
1214 *res = tmp_res;
1215 res->flags = IORESOURCE_IO;
1216 res->name = b->name;
1217
1218 /* Find a resource in the parent where we can allocate */
1219 for (i = 0 ; i < 4; i++) {
1220 struct resource *r = bus->resource[i];
1221 if (!r)
1222 continue;
1223 if ((r->flags & IORESOURCE_IO) == 0)
1224 continue;
1225 DBG("Trying to allocate from %08lx, size %08lx from parent"
1226 " res %d: %08lx -> %08lx\n",
1227 res->start, res->end, i, r->start, r->end);
1228
1229 if (allocate_resource(r, res, res->end + 1, res->start, max,
1230 res->end + 1, NULL, NULL) < 0) {
1231 DBG("Failed !\n");
1232 continue;
1233 }
1234 do_update_p2p_io_resource(b, found_vga);
1235 break;
1236 }
1237 }
1238 do_fixup_p2p_level(b);
1239 }
1240}
1241
1242static void
1243pcibios_fixup_p2p_bridges(void)
1244{
1245 struct pci_bus *b;
1246
1247 list_for_each_entry(b, &pci_root_buses, node)
1248 do_fixup_p2p_level(b);
1249}
1250
1251#endif /* CONFIG_PPC_PMAC */
1252
1253static int __init
1254pcibios_init(void)
1255{
1256 struct pci_controller *hose;
1257 struct pci_bus *bus;
1258 int next_busno;
1259
1260 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1261
1262 /* Scan all of the recorded PCI controllers. */
1263 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1264 if (pci_assign_all_buses)
1265 hose->first_busno = next_busno;
1266 hose->last_busno = 0xff;
1267 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1268 hose->last_busno = bus->subordinate;
1269 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1270 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1271 }
1272 pci_bus_count = next_busno;
1273
1274 /* OpenFirmware based machines need a map of OF bus
1275 * numbers vs. kernel bus numbers since we may have to
1276 * remap them.
1277 */
1278 if (pci_assign_all_buses && have_of)
1279 pcibios_make_OF_bus_map();
1280
1281 /* Do machine dependent PCI interrupt routing */
1282 if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
1283 pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
1284
1285 /* Call machine dependent fixup */
1286 if (ppc_md.pcibios_fixup)
1287 ppc_md.pcibios_fixup();
1288
1289 /* Allocate and assign resources */
1290 pcibios_allocate_bus_resources(&pci_root_buses);
1291 pcibios_allocate_resources(0);
1292 pcibios_allocate_resources(1);
1293#ifdef CONFIG_PPC_PMAC
1294 pcibios_fixup_p2p_bridges();
1295#endif /* CONFIG_PPC_PMAC */
1296 pcibios_assign_resources();
1297
1298 /* Call machine dependent post-init code */
1299 if (ppc_md.pcibios_after_init)
1300 ppc_md.pcibios_after_init();
1301
1302 return 0;
1303}
1304
1305subsys_initcall(pcibios_init);
1306
1307unsigned char __init
1308common_swizzle(struct pci_dev *dev, unsigned char *pinp)
1309{
1310 struct pci_controller *hose = dev->sysdata;
1311
1312 if (dev->bus->number != hose->first_busno) {
1313 u8 pin = *pinp;
1314 do {
1315 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
1316 /* Move up the chain of bridges. */
1317 dev = dev->bus->self;
1318 } while (dev->bus->self);
1319 *pinp = pin;
1320
1321 /* The slot is the idsel of the last bridge. */
1322 }
1323 return PCI_SLOT(dev->devfn);
1324}
1325
1326unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1327 unsigned long start, unsigned long size)
1328{
1329 return start;
1330}
1331
1332void __init pcibios_fixup_bus(struct pci_bus *bus)
1333{
1334 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1335 unsigned long io_offset;
1336 struct resource *res;
1337 int i;
1338
1339 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1340 if (bus->parent == NULL) {
1341 /* This is a host bridge - fill in its resources */
1342 hose->bus = bus;
1343
1344 bus->resource[0] = res = &hose->io_resource;
1345 if (!res->flags) {
1346 if (io_offset)
1347 printk(KERN_ERR "I/O resource not set for host"
1348 " bridge %d\n", hose->index);
1349 res->start = 0;
1350 res->end = IO_SPACE_LIMIT;
1351 res->flags = IORESOURCE_IO;
1352 }
1353 res->start += io_offset;
1354 res->end += io_offset;
1355
1356 for (i = 0; i < 3; ++i) {
1357 res = &hose->mem_resources[i];
1358 if (!res->flags) {
1359 if (i > 0)
1360 continue;
1361 printk(KERN_ERR "Memory resource not set for "
1362 "host bridge %d\n", hose->index);
1363 res->start = hose->pci_mem_offset;
1364 res->end = ~0U;
1365 res->flags = IORESOURCE_MEM;
1366 }
1367 bus->resource[i+1] = res;
1368 }
1369 } else {
1370 /* This is a subordinate bridge */
1371 pci_read_bridge_bases(bus);
1372
1373 for (i = 0; i < 4; ++i) {
1374 if ((res = bus->resource[i]) == NULL)
1375 continue;
1376 if (!res->flags)
1377 continue;
1378 if (io_offset && (res->flags & IORESOURCE_IO)) {
1379 res->start += io_offset;
1380 res->end += io_offset;
1381 } else if (hose->pci_mem_offset
1382 && (res->flags & IORESOURCE_MEM)) {
1383 res->start += hose->pci_mem_offset;
1384 res->end += hose->pci_mem_offset;
1385 }
1386 }
1387 }
1388
1389 if (ppc_md.pcibios_fixup_bus)
1390 ppc_md.pcibios_fixup_bus(bus);
1391}
1392
1393char __init *pcibios_setup(char *str)
1394{
1395 return str;
1396}
1397
1398/* the next one is stolen from the alpha port... */
1399void __init
1400pcibios_update_irq(struct pci_dev *dev, int irq)
1401{
1402 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1403 /* XXX FIXME - update OF device tree node interrupt property */
1404}
1405
1406int pcibios_enable_device(struct pci_dev *dev, int mask)
1407{
1408 u16 cmd, old_cmd;
1409 int idx;
1410 struct resource *r;
1411
1412 if (ppc_md.pcibios_enable_device_hook)
1413 if (ppc_md.pcibios_enable_device_hook(dev, 0))
1414 return -EINVAL;
1415
1416 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1417 old_cmd = cmd;
1418 for (idx=0; idx<6; idx++) {
1419 r = &dev->resource[idx];
1420 if (r->flags & IORESOURCE_UNSET) {
1421 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1422 return -EINVAL;
1423 }
1424 if (r->flags & IORESOURCE_IO)
1425 cmd |= PCI_COMMAND_IO;
1426 if (r->flags & IORESOURCE_MEM)
1427 cmd |= PCI_COMMAND_MEMORY;
1428 }
1429 if (cmd != old_cmd) {
1430 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1431 pci_name(dev), old_cmd, cmd);
1432 pci_write_config_word(dev, PCI_COMMAND, cmd);
1433 }
1434 return 0;
1435}
1436
1437struct pci_controller*
1438pci_bus_to_hose(int bus)
1439{
1440 struct pci_controller* hose = hose_head;
1441
1442 for (; hose; hose = hose->next)
1443 if (bus >= hose->first_busno && bus <= hose->last_busno)
1444 return hose;
1445 return NULL;
1446}
1447
1448void __iomem *
1449pci_bus_io_base(unsigned int bus)
1450{
1451 struct pci_controller *hose;
1452
1453 hose = pci_bus_to_hose(bus);
1454 if (!hose)
1455 return NULL;
1456 return hose->io_base_virt;
1457}
1458
1459unsigned long
1460pci_bus_io_base_phys(unsigned int bus)
1461{
1462 struct pci_controller *hose;
1463
1464 hose = pci_bus_to_hose(bus);
1465 if (!hose)
1466 return 0;
1467 return hose->io_base_phys;
1468}
1469
1470unsigned long
1471pci_bus_mem_base_phys(unsigned int bus)
1472{
1473 struct pci_controller *hose;
1474
1475 hose = pci_bus_to_hose(bus);
1476 if (!hose)
1477 return 0;
1478 return hose->pci_mem_offset;
1479}
1480
1481unsigned long
1482pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1483{
1484 /* Hack alert again ! See comments in chrp_pci.c
1485 */
1486 struct pci_controller* hose =
1487 (struct pci_controller *)pdev->sysdata;
1488 if (hose && res->flags & IORESOURCE_MEM)
1489 return res->start - hose->pci_mem_offset;
1490 /* We may want to do something with IOs here... */
1491 return res->start;
1492}
1493
1494
1495static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1496 unsigned long *offset,
1497 enum pci_mmap_state mmap_state)
1498{
1499 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1500 unsigned long io_offset = 0;
1501 int i, res_bit;
1502
1503 if (hose == 0)
1504 return NULL; /* should never happen */
1505
1506 /* If memory, add on the PCI bridge address offset */
1507 if (mmap_state == pci_mmap_mem) {
1508 *offset += hose->pci_mem_offset;
1509 res_bit = IORESOURCE_MEM;
1510 } else {
1511 io_offset = hose->io_base_virt - ___IO_BASE;
1512 *offset += io_offset;
1513 res_bit = IORESOURCE_IO;
1514 }
1515
1516 /*
1517 * Check that the offset requested corresponds to one of the
1518 * resources of the device.
1519 */
1520 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1521 struct resource *rp = &dev->resource[i];
1522 int flags = rp->flags;
1523
1524 /* treat ROM as memory (should be already) */
1525 if (i == PCI_ROM_RESOURCE)
1526 flags |= IORESOURCE_MEM;
1527
1528 /* Active and same type? */
1529 if ((flags & res_bit) == 0)
1530 continue;
1531
1532 /* In the range of this resource? */
1533 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1534 continue;
1535
1536 /* found it! construct the final physical address */
1537 if (mmap_state == pci_mmap_io)
1538 *offset += hose->io_base_phys - io_offset;
1539 return rp;
1540 }
1541
1542 return NULL;
1543}
1544
1545/*
1546 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1547 * device mapping.
1548 */
1549static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1550 pgprot_t protection,
1551 enum pci_mmap_state mmap_state,
1552 int write_combine)
1553{
1554 unsigned long prot = pgprot_val(protection);
1555
1556 /* Write combine is always 0 on non-memory space mappings. On
1557 * memory space, if the user didn't pass 1, we check for a
1558 * "prefetchable" resource. This is a bit hackish, but we use
1559 * this to workaround the inability of /sysfs to provide a write
1560 * combine bit
1561 */
1562 if (mmap_state != pci_mmap_mem)
1563 write_combine = 0;
1564 else if (write_combine == 0) {
1565 if (rp->flags & IORESOURCE_PREFETCH)
1566 write_combine = 1;
1567 }
1568
1569 /* XXX would be nice to have a way to ask for write-through */
1570 prot |= _PAGE_NO_CACHE;
1571 if (write_combine)
1572 prot &= ~_PAGE_GUARDED;
1573 else
1574 prot |= _PAGE_GUARDED;
1575
1576 printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
1577 prot);
1578
1579 return __pgprot(prot);
1580}
1581
1582/*
1583 * This one is used by /dev/mem and fbdev who have no clue about the
1584 * PCI device, it tries to find the PCI device first and calls the
1585 * above routine
1586 */
1587pgprot_t pci_phys_mem_access_prot(struct file *file,
1588 unsigned long pfn,
1589 unsigned long size,
1590 pgprot_t protection)
1591{
1592 struct pci_dev *pdev = NULL;
1593 struct resource *found = NULL;
1594 unsigned long prot = pgprot_val(protection);
1595 unsigned long offset = pfn << PAGE_SHIFT;
1596 int i;
1597
1598 if (page_is_ram(pfn))
1599 return prot;
1600
1601 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1602
1603 for_each_pci_dev(pdev) {
1604 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1605 struct resource *rp = &pdev->resource[i];
1606 int flags = rp->flags;
1607
1608 /* Active and same type? */
1609 if ((flags & IORESOURCE_MEM) == 0)
1610 continue;
1611 /* In the range of this resource? */
1612 if (offset < (rp->start & PAGE_MASK) ||
1613 offset > rp->end)
1614 continue;
1615 found = rp;
1616 break;
1617 }
1618 if (found)
1619 break;
1620 }
1621 if (found) {
1622 if (found->flags & IORESOURCE_PREFETCH)
1623 prot &= ~_PAGE_GUARDED;
1624 pci_dev_put(pdev);
1625 }
1626
1627 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1628
1629 return __pgprot(prot);
1630}
1631
1632
1633/*
1634 * Perform the actual remap of the pages for a PCI device mapping, as
1635 * appropriate for this architecture. The region in the process to map
1636 * is described by vm_start and vm_end members of VMA, the base physical
1637 * address is found in vm_pgoff.
1638 * The pci device structure is provided so that architectures may make mapping
1639 * decisions on a per-device or per-bus basis.
1640 *
1641 * Returns a negative error code on failure, zero on success.
1642 */
1643int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1644 enum pci_mmap_state mmap_state,
1645 int write_combine)
1646{
1647 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1648 struct resource *rp;
1649 int ret;
1650
1651 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1652 if (rp == NULL)
1653 return -EINVAL;
1654
1655 vma->vm_pgoff = offset >> PAGE_SHIFT;
1656 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
1657 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1658 vma->vm_page_prot,
1659 mmap_state, write_combine);
1660
1661 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1662 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1663
1664 return ret;
1665}
1666
1667/* Obsolete functions. Should be removed once the symbios driver
1668 * is fixed
1669 */
1670unsigned long
1671phys_to_bus(unsigned long pa)
1672{
1673 struct pci_controller *hose;
1674 int i;
1675
1676 for (hose = hose_head; hose; hose = hose->next) {
1677 for (i = 0; i < 3; ++i) {
1678 if (pa >= hose->mem_resources[i].start
1679 && pa <= hose->mem_resources[i].end) {
1680 /*
1681 * XXX the hose->pci_mem_offset really
1682 * only applies to mem_resources[0].
1683 * We need a way to store an offset for
1684 * the others. -- paulus
1685 */
1686 if (i == 0)
1687 pa -= hose->pci_mem_offset;
1688 return pa;
1689 }
1690 }
1691 }
1692 /* hmmm, didn't find it */
1693 return 0;
1694}
1695
1696unsigned long
1697pci_phys_to_bus(unsigned long pa, int busnr)
1698{
1699 struct pci_controller* hose = pci_bus_to_hose(busnr);
1700 if (!hose)
1701 return pa;
1702 return pa - hose->pci_mem_offset;
1703}
1704
1705unsigned long
1706pci_bus_to_phys(unsigned int ba, int busnr)
1707{
1708 struct pci_controller* hose = pci_bus_to_hose(busnr);
1709 if (!hose)
1710 return ba;
1711 return ba + hose->pci_mem_offset;
1712}
1713
1714/* Provide information on locations of various I/O regions in physical
1715 * memory. Do this on a per-card basis so that we choose the right
1716 * root bridge.
1717 * Note that the returned IO or memory base is a physical address
1718 */
1719
1720long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1721{
1722 struct pci_controller* hose;
1723 long result = -EOPNOTSUPP;
1724
1725 /* Argh ! Please forgive me for that hack, but that's the
1726 * simplest way to get existing XFree to not lockup on some
1727 * G5 machines... So when something asks for bus 0 io base
1728 * (bus 0 is HT root), we return the AGP one instead.
1729 */
1730#ifdef CONFIG_PPC_PMAC
1731 if (_machine == _MACH_Pmac && machine_is_compatible("MacRISC4"))
1732 if (bus == 0)
1733 bus = 0xf0;
1734#endif /* CONFIG_PPC_PMAC */
1735
1736 hose = pci_bus_to_hose(bus);
1737 if (!hose)
1738 return -ENODEV;
1739
1740 switch (which) {
1741 case IOBASE_BRIDGE_NUMBER:
1742 return (long)hose->first_busno;
1743 case IOBASE_MEMORY:
1744 return (long)hose->pci_mem_offset;
1745 case IOBASE_IO:
1746 return (long)hose->io_base_phys;
1747 case IOBASE_ISA_IO:
1748 return (long)isa_io_base;
1749 case IOBASE_ISA_MEM:
1750 return (long)isa_mem_base;
1751 }
1752
1753 return result;
1754}
1755
1756void pci_resource_to_user(const struct pci_dev *dev, int bar,
1757 const struct resource *rsrc,
1758 u64 *start, u64 *end)
1759{
1760 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1761 unsigned long offset = 0;
1762
1763 if (hose == NULL)
1764 return;
1765
1766 if (rsrc->flags & IORESOURCE_IO)
1767 offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
1768
1769 *start = rsrc->start + offset;
1770 *end = rsrc->end + offset;
1771}
1772
1773void __init
1774pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
1775 int flags, char *name)
1776{
1777 res->start = start;
1778 res->end = end;
1779 res->flags = flags;
1780 res->name = name;
1781 res->parent = NULL;
1782 res->sibling = NULL;
1783 res->child = NULL;
1784}
1785
1786void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1787{
1788 unsigned long start = pci_resource_start(dev, bar);
1789 unsigned long len = pci_resource_len(dev, bar);
1790 unsigned long flags = pci_resource_flags(dev, bar);
1791
1792 if (!len)
1793 return NULL;
1794 if (max && len > max)
1795 len = max;
1796 if (flags & IORESOURCE_IO)
1797 return ioport_map(start, len);
1798 if (flags & IORESOURCE_MEM)
1799 /* Not checking IORESOURCE_CACHEABLE because PPC does
1800 * not currently distinguish between ioremap and
1801 * ioremap_nocache.
1802 */
1803 return ioremap(start, len);
1804 /* What? */
1805 return NULL;
1806}
1807
1808void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1809{
1810 /* Nothing to do */
1811}
1812EXPORT_SYMBOL(pci_iomap);
1813EXPORT_SYMBOL(pci_iounmap);
1814
1815unsigned long pci_address_to_pio(phys_addr_t address)
1816{
1817 struct pci_controller* hose = hose_head;
1818
1819 for (; hose; hose = hose->next) {
1820 unsigned int size = hose->io_resource.end -
1821 hose->io_resource.start + 1;
1822 if (address >= hose->io_base_phys &&
1823 address < (hose->io_base_phys + size)) {
1824 unsigned long base =
1825 (unsigned long)hose->io_base_virt - _IO_BASE;
1826 return base + (address - hose->io_base_phys);
1827 }
1828 }
1829 return (unsigned int)-1;
1830}
1831EXPORT_SYMBOL(pci_address_to_pio);
1832
1833/*
1834 * Null PCI config access functions, for the case when we can't
1835 * find a hose.
1836 */
1837#define NULL_PCI_OP(rw, size, type) \
1838static int \
1839null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1840{ \
1841 return PCIBIOS_DEVICE_NOT_FOUND; \
1842}
1843
1844static int
1845null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1846 int len, u32 *val)
1847{
1848 return PCIBIOS_DEVICE_NOT_FOUND;
1849}
1850
1851static int
1852null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1853 int len, u32 val)
1854{
1855 return PCIBIOS_DEVICE_NOT_FOUND;
1856}
1857
1858static struct pci_ops null_pci_ops =
1859{
1860 null_read_config,
1861 null_write_config
1862};
1863
1864/*
1865 * These functions are used early on before PCI scanning is done
1866 * and all of the pci_dev and pci_bus structures have been created.
1867 */
1868static struct pci_bus *
1869fake_pci_bus(struct pci_controller *hose, int busnr)
1870{
1871 static struct pci_bus bus;
1872
1873 if (hose == 0) {
1874 hose = pci_bus_to_hose(busnr);
1875 if (hose == 0)
1876 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1877 }
1878 bus.number = busnr;
1879 bus.sysdata = hose;
1880 bus.ops = hose? hose->ops: &null_pci_ops;
1881 return &bus;
1882}
1883
1884#define EARLY_PCI_OP(rw, size, type) \
1885int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1886 int devfn, int offset, type value) \
1887{ \
1888 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1889 devfn, offset, value); \
1890}
1891
1892EARLY_PCI_OP(read, byte, u8 *)
1893EARLY_PCI_OP(read, word, u16 *)
1894EARLY_PCI_OP(read, dword, u32 *)
1895EARLY_PCI_OP(write, byte, u8)
1896EARLY_PCI_OP(write, word, u16)
1897EARLY_PCI_OP(write, dword, u32)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 16d9a904f3cb..d9a459c144d8 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -230,8 +230,7 @@ EXPORT_SYMBOL(__down_interruptible);
230EXPORT_SYMBOL(cpm_install_handler); 230EXPORT_SYMBOL(cpm_install_handler);
231EXPORT_SYMBOL(cpm_free_handler); 231EXPORT_SYMBOL(cpm_free_handler);
232#endif /* CONFIG_8xx */ 232#endif /* CONFIG_8xx */
233#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\ 233#if defined(CONFIG_8xx) || defined(CONFIG_40x)
234 defined(CONFIG_83xx)
235EXPORT_SYMBOL(__res); 234EXPORT_SYMBOL(__res);
236#endif 235#endif
237 236
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 02e2115323e4..d50c8df0183e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1627,6 +1627,11 @@ static void of_node_release(struct kref *kref)
1627 kfree(prop->value); 1627 kfree(prop->value);
1628 kfree(prop); 1628 kfree(prop);
1629 prop = next; 1629 prop = next;
1630
1631 if (!prop) {
1632 prop = node->deadprops;
1633 node->deadprops = NULL;
1634 }
1630 } 1635 }
1631 kfree(node->intrs); 1636 kfree(node->intrs);
1632 kfree(node->full_name); 1637 kfree(node->full_name);
@@ -1774,22 +1779,32 @@ static int __init prom_reconfig_setup(void)
1774__initcall(prom_reconfig_setup); 1779__initcall(prom_reconfig_setup);
1775#endif 1780#endif
1776 1781
1777/* 1782struct property *of_find_property(struct device_node *np, const char *name,
1778 * Find a property with a given name for a given node 1783 int *lenp)
1779 * and return the value.
1780 */
1781unsigned char *get_property(struct device_node *np, const char *name,
1782 int *lenp)
1783{ 1784{
1784 struct property *pp; 1785 struct property *pp;
1785 1786
1787 read_lock(&devtree_lock);
1786 for (pp = np->properties; pp != 0; pp = pp->next) 1788 for (pp = np->properties; pp != 0; pp = pp->next)
1787 if (strcmp(pp->name, name) == 0) { 1789 if (strcmp(pp->name, name) == 0) {
1788 if (lenp != 0) 1790 if (lenp != 0)
1789 *lenp = pp->length; 1791 *lenp = pp->length;
1790 return pp->value; 1792 break;
1791 } 1793 }
1792 return NULL; 1794 read_unlock(&devtree_lock);
1795
1796 return pp;
1797}
1798
1799/*
1800 * Find a property with a given name for a given node
1801 * and return the value.
1802 */
1803unsigned char *get_property(struct device_node *np, const char *name,
1804 int *lenp)
1805{
1806 struct property *pp = of_find_property(np,name,lenp);
1807 return pp ? pp->value : NULL;
1793} 1808}
1794EXPORT_SYMBOL(get_property); 1809EXPORT_SYMBOL(get_property);
1795 1810
@@ -1823,4 +1838,82 @@ int prom_add_property(struct device_node* np, struct property* prop)
1823 return 0; 1838 return 0;
1824} 1839}
1825 1840
1841/*
1842 * Remove a property from a node. Note that we don't actually
1843 * remove it, since we have given out who-knows-how-many pointers
1844 * to the data using get-property. Instead we just move the property
1845 * to the "dead properties" list, so it won't be found any more.
1846 */
1847int prom_remove_property(struct device_node *np, struct property *prop)
1848{
1849 struct property **next;
1850 int found = 0;
1826 1851
1852 write_lock(&devtree_lock);
1853 next = &np->properties;
1854 while (*next) {
1855 if (*next == prop) {
1856 /* found the node */
1857 *next = prop->next;
1858 prop->next = np->deadprops;
1859 np->deadprops = prop;
1860 found = 1;
1861 break;
1862 }
1863 next = &(*next)->next;
1864 }
1865 write_unlock(&devtree_lock);
1866
1867 if (!found)
1868 return -ENODEV;
1869
1870#ifdef CONFIG_PROC_DEVICETREE
1871 /* try to remove the proc node as well */
1872 if (np->pde)
1873 proc_device_tree_remove_prop(np->pde, prop);
1874#endif /* CONFIG_PROC_DEVICETREE */
1875
1876 return 0;
1877}
1878
1879/*
1880 * Update a property in a node. Note that we don't actually
1881 * remove it, since we have given out who-knows-how-many pointers
1882 * to the data using get-property. Instead we just move the property
1883 * to the "dead properties" list, and add the new property to the
1884 * property list
1885 */
1886int prom_update_property(struct device_node *np,
1887 struct property *newprop,
1888 struct property *oldprop)
1889{
1890 struct property **next;
1891 int found = 0;
1892
1893 write_lock(&devtree_lock);
1894 next = &np->properties;
1895 while (*next) {
1896 if (*next == oldprop) {
1897 /* found the node */
1898 newprop->next = oldprop->next;
1899 *next = newprop;
1900 oldprop->next = np->deadprops;
1901 np->deadprops = oldprop;
1902 found = 1;
1903 break;
1904 }
1905 next = &(*next)->next;
1906 }
1907 write_unlock(&devtree_lock);
1908
1909 if (!found)
1910 return -ENODEV;
1911
1912#ifdef CONFIG_PROC_DEVICETREE
1913 /* try to add to proc as well if it was initialized */
1914 if (np->pde)
1915 proc_device_tree_update_prop(np->pde, newprop, oldprop);
1916#endif /* CONFIG_PROC_DEVICETREE */
1917
1918 return 0;
1919}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d963a12ec640..7881ec96ef11 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -605,7 +605,8 @@ static void __init early_cmdline_parse(void)
605 opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel=")); 605 opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
606 if (opt) { 606 if (opt) {
607 opt += 12; 607 opt += 12;
608 RELOC(prom_crashk_size) = prom_memparse(opt, &opt); 608 RELOC(prom_crashk_size) =
609 prom_memparse(opt, (const char **)&opt);
609 610
610 if (ALIGN(RELOC(prom_crashk_size), 0x1000000) != 611 if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
611 RELOC(prom_crashk_size)) { 612 RELOC(prom_crashk_size)) {
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 309ae1d5fa77..a8099c806150 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -113,7 +113,8 @@ static unsigned int of_bus_default_get_flags(u32 *addr)
113 113
114static int of_bus_pci_match(struct device_node *np) 114static int of_bus_pci_match(struct device_node *np)
115{ 115{
116 return !strcmp(np->type, "pci"); 116 /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
117 return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
117} 118}
118 119
119static void of_bus_pci_count_cells(struct device_node *np, 120static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 4b9cfe4637b1..7fe4a5c944c9 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -36,6 +36,11 @@ struct rtas_t rtas = {
36 .lock = SPIN_LOCK_UNLOCKED 36 .lock = SPIN_LOCK_UNLOCKED
37}; 37};
38 38
39struct rtas_suspend_me_data {
40 long waiting;
41 struct rtas_args *args;
42};
43
39EXPORT_SYMBOL(rtas); 44EXPORT_SYMBOL(rtas);
40 45
41DEFINE_SPINLOCK(rtas_data_buf_lock); 46DEFINE_SPINLOCK(rtas_data_buf_lock);
@@ -556,6 +561,80 @@ void rtas_os_term(char *str)
556 } while (status == RTAS_BUSY); 561 } while (status == RTAS_BUSY);
557} 562}
558 563
564static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
565#ifdef CONFIG_PPC_PSERIES
566static void rtas_percpu_suspend_me(void *info)
567{
568 long rc;
569 long flags;
570 struct rtas_suspend_me_data *data =
571 (struct rtas_suspend_me_data *)info;
572
573 /*
574 * We use "waiting" to indicate our state. As long
575 * as it is >0, we are still trying to all join up.
576 * If it goes to 0, we have successfully joined up and
577 * one thread got H_Continue. If any error happens,
578 * we set it to <0.
579 */
580 local_irq_save(flags);
581 do {
582 rc = plpar_hcall_norets(H_JOIN);
583 smp_rmb();
584 } while (rc == H_Success && data->waiting > 0);
585 if (rc == H_Success)
586 goto out;
587
588 if (rc == H_Continue) {
589 data->waiting = 0;
590 rtas_call(ibm_suspend_me_token, 0, 1,
591 data->args->args);
592 } else {
593 data->waiting = -EBUSY;
594 printk(KERN_ERR "Error on H_Join hypervisor call\n");
595 }
596
597out:
598 /* before we restore interrupts, make sure we don't
599 * generate a spurious soft lockup errors
600 */
601 touch_softlockup_watchdog();
602 local_irq_restore(flags);
603 return;
604}
605
606static int rtas_ibm_suspend_me(struct rtas_args *args)
607{
608 int i;
609
610 struct rtas_suspend_me_data data;
611
612 data.waiting = 1;
613 data.args = args;
614
615 /* Call function on all CPUs. One of us will make the
616 * rtas call
617 */
618 if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
619 data.waiting = -EINVAL;
620
621 if (data.waiting != 0)
622 printk(KERN_ERR "Error doing global join\n");
623
624 /* Prod each CPU. This won't hurt, and will wake
625 * anyone we successfully put to sleep with H_Join
626 */
627 for_each_cpu(i)
628 plpar_hcall_norets(H_PROD, i);
629
630 return data.waiting;
631}
632#else /* CONFIG_PPC_PSERIES */
633static int rtas_ibm_suspend_me(struct rtas_args *args)
634{
635 return -ENOSYS;
636}
637#endif
559 638
560asmlinkage int ppc_rtas(struct rtas_args __user *uargs) 639asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
561{ 640{
@@ -563,6 +642,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
563 unsigned long flags; 642 unsigned long flags;
564 char *buff_copy, *errbuf = NULL; 643 char *buff_copy, *errbuf = NULL;
565 int nargs; 644 int nargs;
645 int rc;
566 646
567 if (!capable(CAP_SYS_ADMIN)) 647 if (!capable(CAP_SYS_ADMIN))
568 return -EPERM; 648 return -EPERM;
@@ -581,6 +661,17 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
581 nargs * sizeof(rtas_arg_t)) != 0) 661 nargs * sizeof(rtas_arg_t)) != 0)
582 return -EFAULT; 662 return -EFAULT;
583 663
664 if (args.token == RTAS_UNKNOWN_SERVICE)
665 return -EINVAL;
666
667 /* Need to handle ibm,suspend_me call specially */
668 if (args.token == ibm_suspend_me_token) {
669 rc = rtas_ibm_suspend_me(&args);
670 if (rc)
671 return rc;
672 goto copy_return;
673 }
674
584 buff_copy = get_errorlog_buffer(); 675 buff_copy = get_errorlog_buffer();
585 676
586 spin_lock_irqsave(&rtas.lock, flags); 677 spin_lock_irqsave(&rtas.lock, flags);
@@ -604,6 +695,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
604 kfree(buff_copy); 695 kfree(buff_copy);
605 } 696 }
606 697
698 copy_return:
607 /* Copy out args. */ 699 /* Copy out args. */
608 if (copy_to_user(uargs->args + nargs, 700 if (copy_to_user(uargs->args + nargs,
609 args.args + nargs, 701 args.args + nargs,
@@ -675,8 +767,10 @@ void __init rtas_initialize(void)
675 * the stop-self token if any 767 * the stop-self token if any
676 */ 768 */
677#ifdef CONFIG_PPC64 769#ifdef CONFIG_PPC64
678 if (_machine == PLATFORM_PSERIES_LPAR) 770 if (_machine == PLATFORM_PSERIES_LPAR) {
679 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 771 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
772 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
773 }
680#endif 774#endif
681 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); 775 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
682 776
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index d5c52fae023a..be12041c0fc5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -100,7 +100,8 @@ void machine_shutdown(void)
100void machine_restart(char *cmd) 100void machine_restart(char *cmd)
101{ 101{
102 machine_shutdown(); 102 machine_shutdown();
103 ppc_md.restart(cmd); 103 if (ppc_md.restart)
104 ppc_md.restart(cmd);
104#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
105 smp_send_stop(); 106 smp_send_stop();
106#endif 107#endif
@@ -112,7 +113,8 @@ void machine_restart(char *cmd)
112void machine_power_off(void) 113void machine_power_off(void)
113{ 114{
114 machine_shutdown(); 115 machine_shutdown();
115 ppc_md.power_off(); 116 if (ppc_md.power_off)
117 ppc_md.power_off();
116#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
117 smp_send_stop(); 119 smp_send_stop();
118#endif 120#endif
@@ -129,7 +131,8 @@ EXPORT_SYMBOL_GPL(pm_power_off);
129void machine_halt(void) 131void machine_halt(void)
130{ 132{
131 machine_shutdown(); 133 machine_shutdown();
132 ppc_md.halt(); 134 if (ppc_md.halt)
135 ppc_md.halt();
133#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
134 smp_send_stop(); 137 smp_send_stop();
135#endif 138#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 56f50e91bddb..c4a294d657b9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -431,7 +431,7 @@ void timer_interrupt(struct pt_regs * regs)
431 profile_tick(CPU_PROFILING, regs); 431 profile_tick(CPU_PROFILING, regs);
432 432
433#ifdef CONFIG_PPC_ISERIES 433#ifdef CONFIG_PPC_ISERIES
434 get_paca()->lppaca.int_dword.fields.decr_int = 0; 434 get_lppaca()->int_dword.fields.decr_int = 0;
435#endif 435#endif
436 436
437 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 437 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 13c41495fe06..13c655ba2841 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -76,7 +76,7 @@ static void vio_bus_shutdown(struct device *dev)
76 struct vio_dev *viodev = to_vio_dev(dev); 76 struct vio_dev *viodev = to_vio_dev(dev);
77 struct vio_driver *viodrv = to_vio_driver(dev->driver); 77 struct vio_driver *viodrv = to_vio_driver(dev->driver);
78 78
79 if (viodrv->shutdown) 79 if (dev->driver && viodrv->shutdown)
80 viodrv->shutdown(viodev); 80 viodrv->shutdown(viodev);
81} 81}
82 82
@@ -91,9 +91,6 @@ int vio_register_driver(struct vio_driver *viodrv)
91 91
92 /* fill in 'struct driver' fields */ 92 /* fill in 'struct driver' fields */
93 viodrv->driver.bus = &vio_bus_type; 93 viodrv->driver.bus = &vio_bus_type;
94 viodrv->driver.probe = vio_bus_probe;
95 viodrv->driver.remove = vio_bus_remove;
96 viodrv->driver.shutdown = vio_bus_shutdown;
97 94
98 return driver_register(&viodrv->driver); 95 return driver_register(&viodrv->driver);
99} 96}
@@ -295,4 +292,7 @@ struct bus_type vio_bus_type = {
295 .name = "vio", 292 .name = "vio",
296 .uevent = vio_hotplug, 293 .uevent = vio_hotplug,
297 .match = vio_bus_match, 294 .match = vio_bus_match,
295 .probe = vio_bus_probe,
296 .remove = vio_bus_remove,
297 .shutdown = vio_bus_shutdown,
298}; 298};