diff options
-rw-r--r-- | arch/powerpc/include/asm/exception.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 71 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 9 |
7 files changed, 72 insertions, 64 deletions
diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception.h index 329148b5acc..d3d4534e3c7 100644 --- a/arch/powerpc/include/asm/exception.h +++ b/arch/powerpc/include/asm/exception.h | |||
@@ -53,14 +53,8 @@ | |||
53 | * low halfword of the address, but for Kdump we need the whole low | 53 | * low halfword of the address, but for Kdump we need the whole low |
54 | * word. | 54 | * word. |
55 | */ | 55 | */ |
56 | #ifdef CONFIG_CRASH_DUMP | ||
57 | #define LOAD_HANDLER(reg, label) \ | 56 | #define LOAD_HANDLER(reg, label) \ |
58 | oris reg,reg,(label)@h; /* virt addr of handler ... */ \ | 57 | addi reg,reg,(label)-_stext; /* virt addr of handler ... */ |
59 | ori reg,reg,(label)@l; /* .. and the rest */ | ||
60 | #else | ||
61 | #define LOAD_HANDLER(reg, label) \ | ||
62 | ori reg,reg,(label)@l; /* virt addr of handler ... */ | ||
63 | #endif | ||
64 | 58 | ||
65 | #define EXCEPTION_PROLOG_1(area) \ | 59 | #define EXCEPTION_PROLOG_1(area) \ |
66 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ | 60 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ |
@@ -72,37 +66,12 @@ | |||
72 | std r9,area+EX_R13(r13); \ | 66 | std r9,area+EX_R13(r13); \ |
73 | mfcr r9 | 67 | mfcr r9 |
74 | 68 | ||
75 | /* | ||
76 | * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. | ||
77 | * The firmware calls the registered system_reset_fwnmi and | ||
78 | * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run | ||
79 | * a 32bit application at the time of the event. | ||
80 | * This firmware bug is present on POWER4 and JS20. | ||
81 | */ | ||
82 | #define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ | ||
83 | EXCEPTION_PROLOG_1(area); \ | ||
84 | clrrdi r12,r13,32; /* get high part of &label */ \ | ||
85 | mfmsr r10; \ | ||
86 | /* force 64bit mode */ \ | ||
87 | li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ | ||
88 | rldimi r10,r11,61,0; /* insert into top 3 bits */ \ | ||
89 | /* done 64bit mode */ \ | ||
90 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | ||
91 | LOAD_HANDLER(r12,label) \ | ||
92 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | ||
93 | mtspr SPRN_SRR0,r12; \ | ||
94 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | ||
95 | mtspr SPRN_SRR1,r10; \ | ||
96 | rfid; \ | ||
97 | b . /* prevent speculative execution */ | ||
98 | |||
99 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | 69 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ |
100 | EXCEPTION_PROLOG_1(area); \ | 70 | EXCEPTION_PROLOG_1(area); \ |
101 | clrrdi r12,r13,32; /* get high part of &label */ \ | 71 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
102 | mfmsr r10; \ | 72 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ |
103 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | 73 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ |
104 | LOAD_HANDLER(r12,label) \ | 74 | LOAD_HANDLER(r12,label) \ |
105 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | ||
106 | mtspr SPRN_SRR0,r12; \ | 75 | mtspr SPRN_SRR0,r12; \ |
107 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | 76 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ |
108 | mtspr SPRN_SRR1,r10; \ | 77 | mtspr SPRN_SRR1,r10; \ |
@@ -210,11 +179,10 @@ label##_pSeries: \ | |||
210 | std r10,PACA_EXGEN+EX_R13(r13); \ | 179 | std r10,PACA_EXGEN+EX_R13(r13); \ |
211 | std r11,PACA_EXGEN+EX_R11(r13); \ | 180 | std r11,PACA_EXGEN+EX_R11(r13); \ |
212 | std r12,PACA_EXGEN+EX_R12(r13); \ | 181 | std r12,PACA_EXGEN+EX_R12(r13); \ |
213 | clrrdi r12,r13,32; /* get high part of &label */ \ | 182 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
214 | mfmsr r10; \ | 183 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ |
215 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | 184 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ |
216 | LOAD_HANDLER(r12,label##_common) \ | 185 | LOAD_HANDLER(r12,label##_common) \ |
217 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | ||
218 | mtspr SPRN_SRR0,r12; \ | 186 | mtspr SPRN_SRR0,r12; \ |
219 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | 187 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ |
220 | mtspr SPRN_SRR1,r10; \ | 188 | mtspr SPRN_SRR1,r10; \ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6493a395508..082b3aedf14 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -62,6 +62,8 @@ struct paca_struct { | |||
62 | u16 paca_index; /* Logical processor number */ | 62 | u16 paca_index; /* Logical processor number */ |
63 | 63 | ||
64 | u64 kernel_toc; /* Kernel TOC address */ | 64 | u64 kernel_toc; /* Kernel TOC address */ |
65 | u64 kernelbase; /* Base address of kernel */ | ||
66 | u64 kernel_msr; /* MSR while running in kernel */ | ||
65 | u64 stab_real; /* Absolute address of segment table */ | 67 | u64 stab_real; /* Absolute address of segment table */ |
66 | u64 stab_addr; /* Virtual address of segment table */ | 68 | u64 stab_addr; /* Virtual address of segment table */ |
67 | void *emergency_sp; /* pointer to emergency stack */ | 69 | void *emergency_sp; /* pointer to emergency stack */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 92768d3006f..e9c4044012b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -122,6 +122,8 @@ int main(void) | |||
122 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); | 122 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); |
123 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); | 123 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); |
124 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); | 124 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); |
125 | DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); | ||
126 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | ||
125 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 127 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
126 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 128 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); |
127 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | 129 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 27935d1ab6a..97bb6e6f67b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -82,7 +82,11 @@ END_FTR_SECTION(0, 1) | |||
82 | /* Catch branch to 0 in real mode */ | 82 | /* Catch branch to 0 in real mode */ |
83 | trap | 83 | trap |
84 | 84 | ||
85 | /* Secondary processors spin on this value until it goes to 1. */ | 85 | /* Secondary processors spin on this value until it becomes nonzero. |
86 | * When it does it contains the real address of the descriptor | ||
87 | * of the function that the cpu should jump to to continue | ||
88 | * initialization. | ||
89 | */ | ||
86 | .globl __secondary_hold_spinloop | 90 | .globl __secondary_hold_spinloop |
87 | __secondary_hold_spinloop: | 91 | __secondary_hold_spinloop: |
88 | .llong 0x0 | 92 | .llong 0x0 |
@@ -109,8 +113,11 @@ __secondary_hold_acknowledge: | |||
109 | * before the bulk of the kernel has been relocated. This code | 113 | * before the bulk of the kernel has been relocated. This code |
110 | * is relocated to physical address 0x60 before prom_init is run. | 114 | * is relocated to physical address 0x60 before prom_init is run. |
111 | * All of it must fit below the first exception vector at 0x100. | 115 | * All of it must fit below the first exception vector at 0x100. |
116 | * Use .globl here not _GLOBAL because we want __secondary_hold | ||
117 | * to be the actual text address, not a descriptor. | ||
112 | */ | 118 | */ |
113 | _GLOBAL(__secondary_hold) | 119 | .globl __secondary_hold |
120 | __secondary_hold: | ||
114 | mfmsr r24 | 121 | mfmsr r24 |
115 | ori r24,r24,MSR_RI | 122 | ori r24,r24,MSR_RI |
116 | mtmsrd r24 /* RI on */ | 123 | mtmsrd r24 /* RI on */ |
@@ -126,11 +133,11 @@ _GLOBAL(__secondary_hold) | |||
126 | 133 | ||
127 | /* All secondary cpus wait here until told to start. */ | 134 | /* All secondary cpus wait here until told to start. */ |
128 | 100: ld r4,__secondary_hold_spinloop@l(0) | 135 | 100: ld r4,__secondary_hold_spinloop@l(0) |
129 | cmpdi 0,r4,1 | 136 | cmpdi 0,r4,0 |
130 | bne 100b | 137 | beq 100b |
131 | 138 | ||
132 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | 139 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
133 | LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) | 140 | ld r4,0(r4) /* deref function descriptor */ |
134 | mtctr r4 | 141 | mtctr r4 |
135 | mr r3,r24 | 142 | mr r3,r24 |
136 | bctr | 143 | bctr |
@@ -147,6 +154,10 @@ exception_marker: | |||
147 | /* | 154 | /* |
148 | * This is the start of the interrupt handlers for pSeries | 155 | * This is the start of the interrupt handlers for pSeries |
149 | * This code runs with relocation off. | 156 | * This code runs with relocation off. |
157 | * Code from here to __end_interrupts gets copied down to real | ||
158 | * address 0x100 when we are running a relocatable kernel. | ||
159 | * Therefore any relative branches in this section must only | ||
160 | * branch to labels in this section. | ||
150 | */ | 161 | */ |
151 | . = 0x100 | 162 | . = 0x100 |
152 | .globl __start_interrupts | 163 | .globl __start_interrupts |
@@ -200,7 +211,20 @@ data_access_slb_pSeries: | |||
200 | mfspr r10,SPRN_SPRG1 | 211 | mfspr r10,SPRN_SPRG1 |
201 | std r10,PACA_EXSLB+EX_R13(r13) | 212 | std r10,PACA_EXSLB+EX_R13(r13) |
202 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 213 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
203 | b .slb_miss_realmode /* Rel. branch works in real mode */ | 214 | #ifndef CONFIG_RELOCATABLE |
215 | b .slb_miss_realmode | ||
216 | #else | ||
217 | /* | ||
218 | * We can't just use a direct branch to .slb_miss_realmode | ||
219 | * because the distance from here to there depends on where | ||
220 | * the kernel ends up being put. | ||
221 | */ | ||
222 | mfctr r11 | ||
223 | ld r10,PACAKBASE(r13) | ||
224 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
225 | mtctr r10 | ||
226 | bctr | ||
227 | #endif | ||
204 | 228 | ||
205 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 229 | STD_EXCEPTION_PSERIES(0x400, instruction_access) |
206 | 230 | ||
@@ -225,7 +249,15 @@ instruction_access_slb_pSeries: | |||
225 | mfspr r10,SPRN_SPRG1 | 249 | mfspr r10,SPRN_SPRG1 |
226 | std r10,PACA_EXSLB+EX_R13(r13) | 250 | std r10,PACA_EXSLB+EX_R13(r13) |
227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 251 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
228 | b .slb_miss_realmode /* Rel. branch works in real mode */ | 252 | #ifndef CONFIG_RELOCATABLE |
253 | b .slb_miss_realmode | ||
254 | #else | ||
255 | mfctr r11 | ||
256 | ld r10,PACAKBASE(r13) | ||
257 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
258 | mtctr r10 | ||
259 | bctr | ||
260 | #endif | ||
229 | 261 | ||
230 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 262 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
231 | STD_EXCEPTION_PSERIES(0x600, alignment) | 263 | STD_EXCEPTION_PSERIES(0x600, alignment) |
@@ -244,14 +276,12 @@ BEGIN_FTR_SECTION | |||
244 | beq- 1f | 276 | beq- 1f |
245 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | 277 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) |
246 | mr r9,r13 | 278 | mr r9,r13 |
247 | mfmsr r10 | ||
248 | mfspr r13,SPRN_SPRG3 | 279 | mfspr r13,SPRN_SPRG3 |
249 | mfspr r11,SPRN_SRR0 | 280 | mfspr r11,SPRN_SRR0 |
250 | clrrdi r12,r13,32 | 281 | ld r12,PACAKBASE(r13) |
251 | oris r12,r12,system_call_common@h | 282 | ld r10,PACAKMSR(r13) |
252 | ori r12,r12,system_call_common@l | 283 | LOAD_HANDLER(r12, system_call_entry) |
253 | mtspr SPRN_SRR0,r12 | 284 | mtspr SPRN_SRR0,r12 |
254 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
255 | mfspr r12,SPRN_SRR1 | 285 | mfspr r12,SPRN_SRR1 |
256 | mtspr SPRN_SRR1,r10 | 286 | mtspr SPRN_SRR1,r10 |
257 | rfid | 287 | rfid |
@@ -379,7 +409,10 @@ __end_interrupts: | |||
379 | 409 | ||
380 | /* | 410 | /* |
381 | * Code from here down to __end_handlers is invoked from the | 411 | * Code from here down to __end_handlers is invoked from the |
382 | * exception prologs above. | 412 | * exception prologs above. Because the prologs assemble the |
413 | * addresses of these handlers using the LOAD_HANDLER macro, | ||
414 | * which uses an addi instruction, these handlers must be in | ||
415 | * the first 32k of the kernel image. | ||
383 | */ | 416 | */ |
384 | 417 | ||
385 | /*** Common interrupt handlers ***/ | 418 | /*** Common interrupt handlers ***/ |
@@ -419,6 +452,10 @@ machine_check_common: | |||
419 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | 452 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) |
420 | #endif /* CONFIG_CBE_RAS */ | 453 | #endif /* CONFIG_CBE_RAS */ |
421 | 454 | ||
455 | .align 7 | ||
456 | system_call_entry: | ||
457 | b system_call_common | ||
458 | |||
422 | /* | 459 | /* |
423 | * Here we have detected that the kernel stack pointer is bad. | 460 | * Here we have detected that the kernel stack pointer is bad. |
424 | * R9 contains the saved CR, r13 points to the paca, | 461 | * R9 contains the saved CR, r13 points to the paca, |
@@ -562,6 +599,9 @@ unrecov_user_slb: | |||
562 | */ | 599 | */ |
563 | _GLOBAL(slb_miss_realmode) | 600 | _GLOBAL(slb_miss_realmode) |
564 | mflr r10 | 601 | mflr r10 |
602 | #ifdef CONFIG_RELOCATABLE | ||
603 | mtctr r11 | ||
604 | #endif | ||
565 | 605 | ||
566 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 606 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
567 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 607 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
@@ -612,11 +652,10 @@ BEGIN_FW_FTR_SECTION | |||
612 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 652 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
613 | #endif /* CONFIG_PPC_ISERIES */ | 653 | #endif /* CONFIG_PPC_ISERIES */ |
614 | mfspr r11,SPRN_SRR0 | 654 | mfspr r11,SPRN_SRR0 |
615 | clrrdi r10,r13,32 | 655 | ld r10,PACAKBASE(r13) |
616 | LOAD_HANDLER(r10,unrecov_slb) | 656 | LOAD_HANDLER(r10,unrecov_slb) |
617 | mtspr SPRN_SRR0,r10 | 657 | mtspr SPRN_SRR0,r10 |
618 | mfmsr r10 | 658 | ld r10,PACAKMSR(r13) |
619 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
620 | mtspr SPRN_SRR1,r10 | 659 | mtspr SPRN_SRR1,r10 |
621 | rfid | 660 | rfid |
622 | b . | 661 | b . |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index c9bf17eec31..623e8c3c57f 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -79,6 +79,8 @@ void __init initialise_pacas(void) | |||
79 | new_paca->lock_token = 0x8000; | 79 | new_paca->lock_token = 0x8000; |
80 | new_paca->paca_index = cpu; | 80 | new_paca->paca_index = cpu; |
81 | new_paca->kernel_toc = kernel_toc; | 81 | new_paca->kernel_toc = kernel_toc; |
82 | new_paca->kernelbase = KERNELBASE; | ||
83 | new_paca->kernel_msr = MSR_KERNEL; | ||
82 | new_paca->hw_cpu_id = 0xffff; | 84 | new_paca->hw_cpu_id = 0xffff; |
83 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | 85 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
84 | new_paca->__current = &init_task; | 86 | new_paca->__current = &init_task; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b72849ac7db..1f898858505 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1321,7 +1321,7 @@ static void __init prom_initialize_tce_table(void) | |||
1321 | * | 1321 | * |
1322 | * -- Cort | 1322 | * -- Cort |
1323 | */ | 1323 | */ |
1324 | extern void __secondary_hold(void); | 1324 | extern char __secondary_hold; |
1325 | extern unsigned long __secondary_hold_spinloop; | 1325 | extern unsigned long __secondary_hold_spinloop; |
1326 | extern unsigned long __secondary_hold_acknowledge; | 1326 | extern unsigned long __secondary_hold_acknowledge; |
1327 | 1327 | ||
@@ -1342,13 +1342,7 @@ static void __init prom_hold_cpus(void) | |||
1342 | = (void *) LOW_ADDR(__secondary_hold_spinloop); | 1342 | = (void *) LOW_ADDR(__secondary_hold_spinloop); |
1343 | unsigned long *acknowledge | 1343 | unsigned long *acknowledge |
1344 | = (void *) LOW_ADDR(__secondary_hold_acknowledge); | 1344 | = (void *) LOW_ADDR(__secondary_hold_acknowledge); |
1345 | #ifdef CONFIG_PPC64 | ||
1346 | /* __secondary_hold is actually a descriptor, not the text address */ | ||
1347 | unsigned long secondary_hold | ||
1348 | = __pa(*PTRRELOC((unsigned long *)__secondary_hold)); | ||
1349 | #else | ||
1350 | unsigned long secondary_hold = LOW_ADDR(__secondary_hold); | 1345 | unsigned long secondary_hold = LOW_ADDR(__secondary_hold); |
1351 | #endif | ||
1352 | 1346 | ||
1353 | prom_debug("prom_hold_cpus: start...\n"); | 1347 | prom_debug("prom_hold_cpus: start...\n"); |
1354 | prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); | 1348 | prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8b25f51f03b..843c0af210d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -255,9 +255,11 @@ void early_setup_secondary(void) | |||
255 | #endif /* CONFIG_SMP */ | 255 | #endif /* CONFIG_SMP */ |
256 | 256 | ||
257 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | 257 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
258 | extern unsigned long __secondary_hold_spinloop; | ||
259 | extern void generic_secondary_smp_init(void); | ||
260 | |||
258 | void smp_release_cpus(void) | 261 | void smp_release_cpus(void) |
259 | { | 262 | { |
260 | extern unsigned long __secondary_hold_spinloop; | ||
261 | unsigned long *ptr; | 263 | unsigned long *ptr; |
262 | 264 | ||
263 | DBG(" -> smp_release_cpus()\n"); | 265 | DBG(" -> smp_release_cpus()\n"); |
@@ -266,12 +268,11 @@ void smp_release_cpus(void) | |||
266 | * all now so they can start to spin on their individual paca | 268 | * all now so they can start to spin on their individual paca |
267 | * spinloops. For non SMP kernels, the secondary cpus never get out | 269 | * spinloops. For non SMP kernels, the secondary cpus never get out |
268 | * of the common spinloop. | 270 | * of the common spinloop. |
269 | * This is useless but harmless on iSeries, secondaries are already | 271 | */ |
270 | * waiting on their paca spinloops. */ | ||
271 | 272 | ||
272 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop | 273 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
273 | - PHYSICAL_START); | 274 | - PHYSICAL_START); |
274 | *ptr = 1; | 275 | *ptr = __pa(generic_secondary_smp_init); |
275 | mb(); | 276 | mb(); |
276 | 277 | ||
277 | DBG(" <- smp_release_cpus()\n"); | 278 | DBG(" <- smp_release_cpus()\n"); |