diff options
author | Paul Mackerras <paulus@samba.org> | 2008-08-29 21:40:24 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-09-15 14:08:08 -0400 |
commit | 1f6a93e4c35e75d547b51f56ba8139ab1a91628c (patch) | |
tree | c755528c7f299fa407b3eda77a3e08af78b0b25c /arch/powerpc/kernel | |
parent | 9a95516740c924675d52c472d7d170c62eab176c (diff) |
powerpc: Make it possible to move the interrupt handlers away from the kernel
This changes the way that the exception prologs transfer control to
the handlers in 64-bit kernels with the aim of making it possible to
have the prologs separate from the main body of the kernel. Now,
instead of computing the address of the handler by taking the top
32 bits of the paca address (to get the 0xc0000000........ part) and
ORing in something in the bottom 16 bits, we get the base address of
the kernel by doing a load from the paca and add an offset.
This also replaces an mfmsr and an ori to compute the MSR value for
the handler with a load from the paca. That makes it unnecessary to
have a separate version of EXCEPTION_PROLOG_PSERIES that forces 64-bit
mode.
We can no longer use a direct branches in the exception prolog code,
which means that the SLB miss handlers can't branch directly to
.slb_miss_realmode any more. Instead we have to compute the address
and do an indirect branch. This is conditional on CONFIG_RELOCATABLE;
for non-relocatable kernels we use a direct branch as before. (A later
change will allow CONFIG_RELOCATABLE to be set on 64-bit powerpc.)
Since the secondary CPUs on pSeries start execution in the first 0x100
bytes of real memory and then have to get to wherever the kernel is,
we can't use a direct branch to get there. Instead this changes
__secondary_hold_spinloop from a flag to a function pointer. When it
is set to a non-NULL value, the secondary CPUs jump to the function
pointed to by that value.
Finally this eliminates one code difference between 32-bit and 64-bit
by making __secondary_hold be the text address of the secondary CPU
spinloop rather than a function descriptor for it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 71 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 9 |
5 files changed, 65 insertions, 27 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 92768d3006f7..e9c4044012bd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -122,6 +122,8 @@ int main(void) | |||
122 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); | 122 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); |
123 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); | 123 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); |
124 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); | 124 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); |
125 | DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); | ||
126 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | ||
125 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 127 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
126 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 128 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); |
127 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | 129 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 27935d1ab6a1..97bb6e6f67b1 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -82,7 +82,11 @@ END_FTR_SECTION(0, 1) | |||
82 | /* Catch branch to 0 in real mode */ | 82 | /* Catch branch to 0 in real mode */ |
83 | trap | 83 | trap |
84 | 84 | ||
85 | /* Secondary processors spin on this value until it goes to 1. */ | 85 | /* Secondary processors spin on this value until it becomes nonzero. |
86 | * When it does it contains the real address of the descriptor | ||
87 | * of the function that the cpu should jump to to continue | ||
88 | * initialization. | ||
89 | */ | ||
86 | .globl __secondary_hold_spinloop | 90 | .globl __secondary_hold_spinloop |
87 | __secondary_hold_spinloop: | 91 | __secondary_hold_spinloop: |
88 | .llong 0x0 | 92 | .llong 0x0 |
@@ -109,8 +113,11 @@ __secondary_hold_acknowledge: | |||
109 | * before the bulk of the kernel has been relocated. This code | 113 | * before the bulk of the kernel has been relocated. This code |
110 | * is relocated to physical address 0x60 before prom_init is run. | 114 | * is relocated to physical address 0x60 before prom_init is run. |
111 | * All of it must fit below the first exception vector at 0x100. | 115 | * All of it must fit below the first exception vector at 0x100. |
116 | * Use .globl here not _GLOBAL because we want __secondary_hold | ||
117 | * to be the actual text address, not a descriptor. | ||
112 | */ | 118 | */ |
113 | _GLOBAL(__secondary_hold) | 119 | .globl __secondary_hold |
120 | __secondary_hold: | ||
114 | mfmsr r24 | 121 | mfmsr r24 |
115 | ori r24,r24,MSR_RI | 122 | ori r24,r24,MSR_RI |
116 | mtmsrd r24 /* RI on */ | 123 | mtmsrd r24 /* RI on */ |
@@ -126,11 +133,11 @@ _GLOBAL(__secondary_hold) | |||
126 | 133 | ||
127 | /* All secondary cpus wait here until told to start. */ | 134 | /* All secondary cpus wait here until told to start. */ |
128 | 100: ld r4,__secondary_hold_spinloop@l(0) | 135 | 100: ld r4,__secondary_hold_spinloop@l(0) |
129 | cmpdi 0,r4,1 | 136 | cmpdi 0,r4,0 |
130 | bne 100b | 137 | beq 100b |
131 | 138 | ||
132 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | 139 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
133 | LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) | 140 | ld r4,0(r4) /* deref function descriptor */ |
134 | mtctr r4 | 141 | mtctr r4 |
135 | mr r3,r24 | 142 | mr r3,r24 |
136 | bctr | 143 | bctr |
@@ -147,6 +154,10 @@ exception_marker: | |||
147 | /* | 154 | /* |
148 | * This is the start of the interrupt handlers for pSeries | 155 | * This is the start of the interrupt handlers for pSeries |
149 | * This code runs with relocation off. | 156 | * This code runs with relocation off. |
157 | * Code from here to __end_interrupts gets copied down to real | ||
158 | * address 0x100 when we are running a relocatable kernel. | ||
159 | * Therefore any relative branches in this section must only | ||
160 | * branch to labels in this section. | ||
150 | */ | 161 | */ |
151 | . = 0x100 | 162 | . = 0x100 |
152 | .globl __start_interrupts | 163 | .globl __start_interrupts |
@@ -200,7 +211,20 @@ data_access_slb_pSeries: | |||
200 | mfspr r10,SPRN_SPRG1 | 211 | mfspr r10,SPRN_SPRG1 |
201 | std r10,PACA_EXSLB+EX_R13(r13) | 212 | std r10,PACA_EXSLB+EX_R13(r13) |
202 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 213 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
203 | b .slb_miss_realmode /* Rel. branch works in real mode */ | 214 | #ifndef CONFIG_RELOCATABLE |
215 | b .slb_miss_realmode | ||
216 | #else | ||
217 | /* | ||
218 | * We can't just use a direct branch to .slb_miss_realmode | ||
219 | * because the distance from here to there depends on where | ||
220 | * the kernel ends up being put. | ||
221 | */ | ||
222 | mfctr r11 | ||
223 | ld r10,PACAKBASE(r13) | ||
224 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
225 | mtctr r10 | ||
226 | bctr | ||
227 | #endif | ||
204 | 228 | ||
205 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 229 | STD_EXCEPTION_PSERIES(0x400, instruction_access) |
206 | 230 | ||
@@ -225,7 +249,15 @@ instruction_access_slb_pSeries: | |||
225 | mfspr r10,SPRN_SPRG1 | 249 | mfspr r10,SPRN_SPRG1 |
226 | std r10,PACA_EXSLB+EX_R13(r13) | 250 | std r10,PACA_EXSLB+EX_R13(r13) |
227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 251 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
228 | b .slb_miss_realmode /* Rel. branch works in real mode */ | 252 | #ifndef CONFIG_RELOCATABLE |
253 | b .slb_miss_realmode | ||
254 | #else | ||
255 | mfctr r11 | ||
256 | ld r10,PACAKBASE(r13) | ||
257 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
258 | mtctr r10 | ||
259 | bctr | ||
260 | #endif | ||
229 | 261 | ||
230 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 262 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
231 | STD_EXCEPTION_PSERIES(0x600, alignment) | 263 | STD_EXCEPTION_PSERIES(0x600, alignment) |
@@ -244,14 +276,12 @@ BEGIN_FTR_SECTION | |||
244 | beq- 1f | 276 | beq- 1f |
245 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | 277 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) |
246 | mr r9,r13 | 278 | mr r9,r13 |
247 | mfmsr r10 | ||
248 | mfspr r13,SPRN_SPRG3 | 279 | mfspr r13,SPRN_SPRG3 |
249 | mfspr r11,SPRN_SRR0 | 280 | mfspr r11,SPRN_SRR0 |
250 | clrrdi r12,r13,32 | 281 | ld r12,PACAKBASE(r13) |
251 | oris r12,r12,system_call_common@h | 282 | ld r10,PACAKMSR(r13) |
252 | ori r12,r12,system_call_common@l | 283 | LOAD_HANDLER(r12, system_call_entry) |
253 | mtspr SPRN_SRR0,r12 | 284 | mtspr SPRN_SRR0,r12 |
254 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
255 | mfspr r12,SPRN_SRR1 | 285 | mfspr r12,SPRN_SRR1 |
256 | mtspr SPRN_SRR1,r10 | 286 | mtspr SPRN_SRR1,r10 |
257 | rfid | 287 | rfid |
@@ -379,7 +409,10 @@ __end_interrupts: | |||
379 | 409 | ||
380 | /* | 410 | /* |
381 | * Code from here down to __end_handlers is invoked from the | 411 | * Code from here down to __end_handlers is invoked from the |
382 | * exception prologs above. | 412 | * exception prologs above. Because the prologs assemble the |
413 | * addresses of these handlers using the LOAD_HANDLER macro, | ||
414 | * which uses an addi instruction, these handlers must be in | ||
415 | * the first 32k of the kernel image. | ||
383 | */ | 416 | */ |
384 | 417 | ||
385 | /*** Common interrupt handlers ***/ | 418 | /*** Common interrupt handlers ***/ |
@@ -419,6 +452,10 @@ machine_check_common: | |||
419 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | 452 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) |
420 | #endif /* CONFIG_CBE_RAS */ | 453 | #endif /* CONFIG_CBE_RAS */ |
421 | 454 | ||
455 | .align 7 | ||
456 | system_call_entry: | ||
457 | b system_call_common | ||
458 | |||
422 | /* | 459 | /* |
423 | * Here we have detected that the kernel stack pointer is bad. | 460 | * Here we have detected that the kernel stack pointer is bad. |
424 | * R9 contains the saved CR, r13 points to the paca, | 461 | * R9 contains the saved CR, r13 points to the paca, |
@@ -562,6 +599,9 @@ unrecov_user_slb: | |||
562 | */ | 599 | */ |
563 | _GLOBAL(slb_miss_realmode) | 600 | _GLOBAL(slb_miss_realmode) |
564 | mflr r10 | 601 | mflr r10 |
602 | #ifdef CONFIG_RELOCATABLE | ||
603 | mtctr r11 | ||
604 | #endif | ||
565 | 605 | ||
566 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 606 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
567 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 607 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
@@ -612,11 +652,10 @@ BEGIN_FW_FTR_SECTION | |||
612 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 652 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
613 | #endif /* CONFIG_PPC_ISERIES */ | 653 | #endif /* CONFIG_PPC_ISERIES */ |
614 | mfspr r11,SPRN_SRR0 | 654 | mfspr r11,SPRN_SRR0 |
615 | clrrdi r10,r13,32 | 655 | ld r10,PACAKBASE(r13) |
616 | LOAD_HANDLER(r10,unrecov_slb) | 656 | LOAD_HANDLER(r10,unrecov_slb) |
617 | mtspr SPRN_SRR0,r10 | 657 | mtspr SPRN_SRR0,r10 |
618 | mfmsr r10 | 658 | ld r10,PACAKMSR(r13) |
619 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
620 | mtspr SPRN_SRR1,r10 | 659 | mtspr SPRN_SRR1,r10 |
621 | rfid | 660 | rfid |
622 | b . | 661 | b . |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index c9bf17eec31b..623e8c3c57f9 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -79,6 +79,8 @@ void __init initialise_pacas(void) | |||
79 | new_paca->lock_token = 0x8000; | 79 | new_paca->lock_token = 0x8000; |
80 | new_paca->paca_index = cpu; | 80 | new_paca->paca_index = cpu; |
81 | new_paca->kernel_toc = kernel_toc; | 81 | new_paca->kernel_toc = kernel_toc; |
82 | new_paca->kernelbase = KERNELBASE; | ||
83 | new_paca->kernel_msr = MSR_KERNEL; | ||
82 | new_paca->hw_cpu_id = 0xffff; | 84 | new_paca->hw_cpu_id = 0xffff; |
83 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | 85 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
84 | new_paca->__current = &init_task; | 86 | new_paca->__current = &init_task; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b72849ac7db3..1f8988585054 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1321,7 +1321,7 @@ static void __init prom_initialize_tce_table(void) | |||
1321 | * | 1321 | * |
1322 | * -- Cort | 1322 | * -- Cort |
1323 | */ | 1323 | */ |
1324 | extern void __secondary_hold(void); | 1324 | extern char __secondary_hold; |
1325 | extern unsigned long __secondary_hold_spinloop; | 1325 | extern unsigned long __secondary_hold_spinloop; |
1326 | extern unsigned long __secondary_hold_acknowledge; | 1326 | extern unsigned long __secondary_hold_acknowledge; |
1327 | 1327 | ||
@@ -1342,13 +1342,7 @@ static void __init prom_hold_cpus(void) | |||
1342 | = (void *) LOW_ADDR(__secondary_hold_spinloop); | 1342 | = (void *) LOW_ADDR(__secondary_hold_spinloop); |
1343 | unsigned long *acknowledge | 1343 | unsigned long *acknowledge |
1344 | = (void *) LOW_ADDR(__secondary_hold_acknowledge); | 1344 | = (void *) LOW_ADDR(__secondary_hold_acknowledge); |
1345 | #ifdef CONFIG_PPC64 | ||
1346 | /* __secondary_hold is actually a descriptor, not the text address */ | ||
1347 | unsigned long secondary_hold | ||
1348 | = __pa(*PTRRELOC((unsigned long *)__secondary_hold)); | ||
1349 | #else | ||
1350 | unsigned long secondary_hold = LOW_ADDR(__secondary_hold); | 1345 | unsigned long secondary_hold = LOW_ADDR(__secondary_hold); |
1351 | #endif | ||
1352 | 1346 | ||
1353 | prom_debug("prom_hold_cpus: start...\n"); | 1347 | prom_debug("prom_hold_cpus: start...\n"); |
1354 | prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); | 1348 | prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8b25f51f03bf..843c0af210d0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -255,9 +255,11 @@ void early_setup_secondary(void) | |||
255 | #endif /* CONFIG_SMP */ | 255 | #endif /* CONFIG_SMP */ |
256 | 256 | ||
257 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | 257 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
258 | extern unsigned long __secondary_hold_spinloop; | ||
259 | extern void generic_secondary_smp_init(void); | ||
260 | |||
258 | void smp_release_cpus(void) | 261 | void smp_release_cpus(void) |
259 | { | 262 | { |
260 | extern unsigned long __secondary_hold_spinloop; | ||
261 | unsigned long *ptr; | 263 | unsigned long *ptr; |
262 | 264 | ||
263 | DBG(" -> smp_release_cpus()\n"); | 265 | DBG(" -> smp_release_cpus()\n"); |
@@ -266,12 +268,11 @@ void smp_release_cpus(void) | |||
266 | * all now so they can start to spin on their individual paca | 268 | * all now so they can start to spin on their individual paca |
267 | * spinloops. For non SMP kernels, the secondary cpus never get out | 269 | * spinloops. For non SMP kernels, the secondary cpus never get out |
268 | * of the common spinloop. | 270 | * of the common spinloop. |
269 | * This is useless but harmless on iSeries, secondaries are already | 271 | */ |
270 | * waiting on their paca spinloops. */ | ||
271 | 272 | ||
272 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop | 273 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
273 | - PHYSICAL_START); | 274 | - PHYSICAL_START); |
274 | *ptr = 1; | 275 | *ptr = __pa(generic_secondary_smp_init); |
275 | mb(); | 276 | mb(); |
276 | 277 | ||
277 | DBG(" <- smp_release_cpus()\n"); | 278 | DBG(" <- smp_release_cpus()\n"); |