diff options
26 files changed, 271 insertions, 271 deletions
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 6636b1d7821b..243b8497d58b 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S | |||
@@ -45,7 +45,7 @@ udelay: | |||
45 | mfspr r4,SPRN_PVR | 45 | mfspr r4,SPRN_PVR |
46 | srwi r4,r4,16 | 46 | srwi r4,r4,16 |
47 | cmpwi 0,r4,1 /* 601 ? */ | 47 | cmpwi 0,r4,1 /* 601 ? */ |
48 | bne .udelay_not_601 | 48 | bne .Ludelay_not_601 |
49 | 00: li r0,86 /* Instructions / microsecond? */ | 49 | 00: li r0,86 /* Instructions / microsecond? */ |
50 | mtctr r0 | 50 | mtctr r0 |
51 | 10: addi r0,r0,0 /* NOP */ | 51 | 10: addi r0,r0,0 /* NOP */ |
@@ -54,7 +54,7 @@ udelay: | |||
54 | bne 00b | 54 | bne 00b |
55 | blr | 55 | blr |
56 | 56 | ||
57 | .udelay_not_601: | 57 | .Ludelay_not_601: |
58 | mulli r4,r3,1000 /* nanoseconds */ | 58 | mulli r4,r3,1000 /* nanoseconds */ |
59 | /* Change r4 to be the number of ticks using: | 59 | /* Change r4 to be the number of ticks using: |
60 | * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns | 60 | * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns |
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h index b6f5a33b8ee2..40014921ffff 100644 --- a/arch/powerpc/include/asm/context_tracking.h +++ b/arch/powerpc/include/asm/context_tracking.h | |||
@@ -2,9 +2,9 @@ | |||
2 | #define _ASM_POWERPC_CONTEXT_TRACKING_H | 2 | #define _ASM_POWERPC_CONTEXT_TRACKING_H |
3 | 3 | ||
4 | #ifdef CONFIG_CONTEXT_TRACKING | 4 | #ifdef CONFIG_CONTEXT_TRACKING |
5 | #define SCHEDULE_USER bl .schedule_user | 5 | #define SCHEDULE_USER bl schedule_user |
6 | #else | 6 | #else |
7 | #define SCHEDULE_USER bl .schedule | 7 | #define SCHEDULE_USER bl schedule |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #endif | 10 | #endif |
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a563d9afd179..a8b52b61043f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h | |||
@@ -174,10 +174,10 @@ exc_##label##_book3e: | |||
174 | mtlr r16; | 174 | mtlr r16; |
175 | #define TLB_MISS_STATS_D(name) \ | 175 | #define TLB_MISS_STATS_D(name) \ |
176 | addi r9,r13,MMSTAT_DSTATS+name; \ | 176 | addi r9,r13,MMSTAT_DSTATS+name; \ |
177 | bl .tlb_stat_inc; | 177 | bl tlb_stat_inc; |
178 | #define TLB_MISS_STATS_I(name) \ | 178 | #define TLB_MISS_STATS_I(name) \ |
179 | addi r9,r13,MMSTAT_ISTATS+name; \ | 179 | addi r9,r13,MMSTAT_ISTATS+name; \ |
180 | bl .tlb_stat_inc; | 180 | bl tlb_stat_inc; |
181 | #define TLB_MISS_STATS_X(name) \ | 181 | #define TLB_MISS_STATS_X(name) \ |
182 | ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ | 182 | ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ |
183 | cmpdi cr2,r8,-1; \ | 183 | cmpdi cr2,r8,-1; \ |
@@ -185,7 +185,7 @@ exc_##label##_book3e: | |||
185 | addi r9,r13,MMSTAT_DSTATS+name; \ | 185 | addi r9,r13,MMSTAT_DSTATS+name; \ |
186 | b 62f; \ | 186 | b 62f; \ |
187 | 61: addi r9,r13,MMSTAT_ISTATS+name; \ | 187 | 61: addi r9,r13,MMSTAT_ISTATS+name; \ |
188 | 62: bl .tlb_stat_inc; | 188 | 62: bl tlb_stat_inc; |
189 | #define TLB_MISS_STATS_SAVE_INFO \ | 189 | #define TLB_MISS_STATS_SAVE_INFO \ |
190 | std r14,EX_TLB_ESR(r12); /* save ESR */ | 190 | std r14,EX_TLB_ESR(r12); /* save ESR */ |
191 | #define TLB_MISS_STATS_SAVE_INFO_BOLTED \ | 191 | #define TLB_MISS_STATS_SAVE_INFO_BOLTED \ |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index aeaa56cd9b54..8f35cd7d59cc 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -517,7 +517,7 @@ label##_relon_hv: \ | |||
517 | #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) | 517 | #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) |
518 | 518 | ||
519 | #define ADD_NVGPRS \ | 519 | #define ADD_NVGPRS \ |
520 | bl .save_nvgprs | 520 | bl save_nvgprs |
521 | 521 | ||
522 | #define RUNLATCH_ON \ | 522 | #define RUNLATCH_ON \ |
523 | BEGIN_FTR_SECTION \ | 523 | BEGIN_FTR_SECTION \ |
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f51a5580bfd0..f62c056e75bf 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h | |||
@@ -36,8 +36,8 @@ | |||
36 | * have to call a C function so call a wrapper that saves all the | 36 | * have to call a C function so call a wrapper that saves all the |
37 | * C-clobbered registers. | 37 | * C-clobbered registers. |
38 | */ | 38 | */ |
39 | #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) | 39 | #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) |
40 | #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) | 40 | #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * This is used by assembly code to soft-disable interrupts first and | 43 | * This is used by assembly code to soft-disable interrupts first and |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6586a40a46ce..3128ba3ba7a0 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION; \ | |||
57 | LDX_BE r10,0,r10; /* get log write index */ \ | 57 | LDX_BE r10,0,r10; /* get log write index */ \ |
58 | cmpd cr1,r11,r10; \ | 58 | cmpd cr1,r11,r10; \ |
59 | beq+ cr1,33f; \ | 59 | beq+ cr1,33f; \ |
60 | bl .accumulate_stolen_time; \ | 60 | bl accumulate_stolen_time; \ |
61 | ld r12,_MSR(r1); \ | 61 | ld r12,_MSR(r1); \ |
62 | andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ | 62 | andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ |
63 | 33: \ | 63 | 33: \ |
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index cc2d8962e090..4f1393d20079 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) | |||
94 | _GLOBAL(__setup_cpu_e6500) | 94 | _GLOBAL(__setup_cpu_e6500) |
95 | mflr r6 | 95 | mflr r6 |
96 | #ifdef CONFIG_PPC64 | 96 | #ifdef CONFIG_PPC64 |
97 | bl .setup_altivec_ivors | 97 | bl setup_altivec_ivors |
98 | /* Touch IVOR42 only if the CPU supports E.HV category */ | 98 | /* Touch IVOR42 only if the CPU supports E.HV category */ |
99 | mfspr r10,SPRN_MMUCFG | 99 | mfspr r10,SPRN_MMUCFG |
100 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 100 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
101 | beq 1f | 101 | beq 1f |
102 | bl .setup_lrat_ivor | 102 | bl setup_lrat_ivor |
103 | 1: | 103 | 1: |
104 | #endif | 104 | #endif |
105 | bl setup_pw20_idle | 105 | bl setup_pw20_idle |
@@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) | |||
164 | #ifdef CONFIG_PPC_BOOK3E_64 | 164 | #ifdef CONFIG_PPC_BOOK3E_64 |
165 | _GLOBAL(__restore_cpu_e6500) | 165 | _GLOBAL(__restore_cpu_e6500) |
166 | mflr r5 | 166 | mflr r5 |
167 | bl .setup_altivec_ivors | 167 | bl setup_altivec_ivors |
168 | /* Touch IVOR42 only if the CPU supports E.HV category */ | 168 | /* Touch IVOR42 only if the CPU supports E.HV category */ |
169 | mfspr r10,SPRN_MMUCFG | 169 | mfspr r10,SPRN_MMUCFG |
170 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 170 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
171 | beq 1f | 171 | beq 1f |
172 | bl .setup_lrat_ivor | 172 | bl setup_lrat_ivor |
173 | 1: | 173 | 1: |
174 | bl .setup_pw20_idle | 174 | bl setup_pw20_idle |
175 | bl .setup_altivec_idle | 175 | bl setup_altivec_idle |
176 | bl __restore_cpu_e5500 | 176 | bl __restore_cpu_e5500 |
177 | mtlr r5 | 177 | mtlr r5 |
178 | blr | 178 | blr |
@@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) | |||
181 | mflr r4 | 181 | mflr r4 |
182 | bl __e500_icache_setup | 182 | bl __e500_icache_setup |
183 | bl __e500_dcache_setup | 183 | bl __e500_dcache_setup |
184 | bl .__setup_base_ivors | 184 | bl __setup_base_ivors |
185 | bl .setup_perfmon_ivor | 185 | bl setup_perfmon_ivor |
186 | bl .setup_doorbell_ivors | 186 | bl setup_doorbell_ivors |
187 | /* | 187 | /* |
188 | * We only want to touch IVOR38-41 if we're running on hardware | 188 | * We only want to touch IVOR38-41 if we're running on hardware |
189 | * that supports category E.HV. The architectural way to determine | 189 | * that supports category E.HV. The architectural way to determine |
@@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) | |||
192 | mfspr r10,SPRN_MMUCFG | 192 | mfspr r10,SPRN_MMUCFG |
193 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 193 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
194 | beq 1f | 194 | beq 1f |
195 | bl .setup_ehv_ivors | 195 | bl setup_ehv_ivors |
196 | 1: | 196 | 1: |
197 | mtlr r4 | 197 | mtlr r4 |
198 | blr | 198 | blr |
@@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) | |||
201 | mflr r5 | 201 | mflr r5 |
202 | bl __e500_icache_setup | 202 | bl __e500_icache_setup |
203 | bl __e500_dcache_setup | 203 | bl __e500_dcache_setup |
204 | bl .__setup_base_ivors | 204 | bl __setup_base_ivors |
205 | bl .setup_perfmon_ivor | 205 | bl setup_perfmon_ivor |
206 | bl .setup_doorbell_ivors | 206 | bl setup_doorbell_ivors |
207 | /* | 207 | /* |
208 | * We only want to touch IVOR38-41 if we're running on hardware | 208 | * We only want to touch IVOR38-41 if we're running on hardware |
209 | * that supports category E.HV. The architectural way to determine | 209 | * that supports category E.HV. The architectural way to determine |
@@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) | |||
212 | mfspr r10,SPRN_MMUCFG | 212 | mfspr r10,SPRN_MMUCFG |
213 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 213 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
214 | beq 1f | 214 | beq 1f |
215 | bl .setup_ehv_ivors | 215 | bl setup_ehv_ivors |
216 | b 2f | 216 | b 2f |
217 | 1: | 217 | 1: |
218 | ld r10,CPU_SPEC_FEATURES(r4) | 218 | ld r10,CPU_SPEC_FEATURES(r4) |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 662c6dd98072..b629198b072c 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION | |||
106 | LDX_BE r10,0,r10 /* get log write index */ | 106 | LDX_BE r10,0,r10 /* get log write index */ |
107 | cmpd cr1,r11,r10 | 107 | cmpd cr1,r11,r10 |
108 | beq+ cr1,33f | 108 | beq+ cr1,33f |
109 | bl .accumulate_stolen_time | 109 | bl accumulate_stolen_time |
110 | REST_GPR(0,r1) | 110 | REST_GPR(0,r1) |
111 | REST_4GPRS(3,r1) | 111 | REST_4GPRS(3,r1) |
112 | REST_2GPRS(7,r1) | 112 | REST_2GPRS(7,r1) |
@@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
143 | std r10,SOFTE(r1) | 143 | std r10,SOFTE(r1) |
144 | 144 | ||
145 | #ifdef SHOW_SYSCALLS | 145 | #ifdef SHOW_SYSCALLS |
146 | bl .do_show_syscall | 146 | bl do_show_syscall |
147 | REST_GPR(0,r1) | 147 | REST_GPR(0,r1) |
148 | REST_4GPRS(3,r1) | 148 | REST_4GPRS(3,r1) |
149 | REST_2GPRS(7,r1) | 149 | REST_2GPRS(7,r1) |
@@ -181,7 +181,7 @@ system_call: /* label this so stack traces look sane */ | |||
181 | syscall_exit: | 181 | syscall_exit: |
182 | std r3,RESULT(r1) | 182 | std r3,RESULT(r1) |
183 | #ifdef SHOW_SYSCALLS | 183 | #ifdef SHOW_SYSCALLS |
184 | bl .do_show_syscall_exit | 184 | bl do_show_syscall_exit |
185 | ld r3,RESULT(r1) | 185 | ld r3,RESULT(r1) |
186 | #endif | 186 | #endif |
187 | CURRENT_THREAD_INFO(r12, r1) | 187 | CURRENT_THREAD_INFO(r12, r1) |
@@ -248,9 +248,9 @@ syscall_error: | |||
248 | 248 | ||
249 | /* Traced system call support */ | 249 | /* Traced system call support */ |
250 | syscall_dotrace: | 250 | syscall_dotrace: |
251 | bl .save_nvgprs | 251 | bl save_nvgprs |
252 | addi r3,r1,STACK_FRAME_OVERHEAD | 252 | addi r3,r1,STACK_FRAME_OVERHEAD |
253 | bl .do_syscall_trace_enter | 253 | bl do_syscall_trace_enter |
254 | /* | 254 | /* |
255 | * Restore argument registers possibly just changed. | 255 | * Restore argument registers possibly just changed. |
256 | * We use the return value of do_syscall_trace_enter | 256 | * We use the return value of do_syscall_trace_enter |
@@ -308,7 +308,7 @@ syscall_exit_work: | |||
308 | 4: /* Anything else left to do? */ | 308 | 4: /* Anything else left to do? */ |
309 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ | 309 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ |
310 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 310 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
311 | beq .ret_from_except_lite | 311 | beq ret_from_except_lite |
312 | 312 | ||
313 | /* Re-enable interrupts */ | 313 | /* Re-enable interrupts */ |
314 | #ifdef CONFIG_PPC_BOOK3E | 314 | #ifdef CONFIG_PPC_BOOK3E |
@@ -319,10 +319,10 @@ syscall_exit_work: | |||
319 | mtmsrd r10,1 | 319 | mtmsrd r10,1 |
320 | #endif /* CONFIG_PPC_BOOK3E */ | 320 | #endif /* CONFIG_PPC_BOOK3E */ |
321 | 321 | ||
322 | bl .save_nvgprs | 322 | bl save_nvgprs |
323 | addi r3,r1,STACK_FRAME_OVERHEAD | 323 | addi r3,r1,STACK_FRAME_OVERHEAD |
324 | bl .do_syscall_trace_leave | 324 | bl do_syscall_trace_leave |
325 | b .ret_from_except | 325 | b ret_from_except |
326 | 326 | ||
327 | /* Save non-volatile GPRs, if not already saved. */ | 327 | /* Save non-volatile GPRs, if not already saved. */ |
328 | _GLOBAL(save_nvgprs) | 328 | _GLOBAL(save_nvgprs) |
@@ -345,38 +345,38 @@ _GLOBAL(save_nvgprs) | |||
345 | */ | 345 | */ |
346 | 346 | ||
347 | _GLOBAL(ppc_fork) | 347 | _GLOBAL(ppc_fork) |
348 | bl .save_nvgprs | 348 | bl save_nvgprs |
349 | bl .sys_fork | 349 | bl sys_fork |
350 | b syscall_exit | 350 | b syscall_exit |
351 | 351 | ||
352 | _GLOBAL(ppc_vfork) | 352 | _GLOBAL(ppc_vfork) |
353 | bl .save_nvgprs | 353 | bl save_nvgprs |
354 | bl .sys_vfork | 354 | bl sys_vfork |
355 | b syscall_exit | 355 | b syscall_exit |
356 | 356 | ||
357 | _GLOBAL(ppc_clone) | 357 | _GLOBAL(ppc_clone) |
358 | bl .save_nvgprs | 358 | bl save_nvgprs |
359 | bl .sys_clone | 359 | bl sys_clone |
360 | b syscall_exit | 360 | b syscall_exit |
361 | 361 | ||
362 | _GLOBAL(ppc32_swapcontext) | 362 | _GLOBAL(ppc32_swapcontext) |
363 | bl .save_nvgprs | 363 | bl save_nvgprs |
364 | bl .compat_sys_swapcontext | 364 | bl compat_sys_swapcontext |
365 | b syscall_exit | 365 | b syscall_exit |
366 | 366 | ||
367 | _GLOBAL(ppc64_swapcontext) | 367 | _GLOBAL(ppc64_swapcontext) |
368 | bl .save_nvgprs | 368 | bl save_nvgprs |
369 | bl .sys_swapcontext | 369 | bl sys_swapcontext |
370 | b syscall_exit | 370 | b syscall_exit |
371 | 371 | ||
372 | _GLOBAL(ret_from_fork) | 372 | _GLOBAL(ret_from_fork) |
373 | bl .schedule_tail | 373 | bl schedule_tail |
374 | REST_NVGPRS(r1) | 374 | REST_NVGPRS(r1) |
375 | li r3,0 | 375 | li r3,0 |
376 | b syscall_exit | 376 | b syscall_exit |
377 | 377 | ||
378 | _GLOBAL(ret_from_kernel_thread) | 378 | _GLOBAL(ret_from_kernel_thread) |
379 | bl .schedule_tail | 379 | bl schedule_tail |
380 | REST_NVGPRS(r1) | 380 | REST_NVGPRS(r1) |
381 | ld r14, 0(r14) | 381 | ld r14, 0(r14) |
382 | mtlr r14 | 382 | mtlr r14 |
@@ -611,7 +611,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | |||
611 | _GLOBAL(ret_from_except) | 611 | _GLOBAL(ret_from_except) |
612 | ld r11,_TRAP(r1) | 612 | ld r11,_TRAP(r1) |
613 | andi. r0,r11,1 | 613 | andi. r0,r11,1 |
614 | bne .ret_from_except_lite | 614 | bne ret_from_except_lite |
615 | REST_NVGPRS(r1) | 615 | REST_NVGPRS(r1) |
616 | 616 | ||
617 | _GLOBAL(ret_from_except_lite) | 617 | _GLOBAL(ret_from_except_lite) |
@@ -661,23 +661,23 @@ _GLOBAL(ret_from_except_lite) | |||
661 | #endif | 661 | #endif |
662 | 1: andi. r0,r4,_TIF_NEED_RESCHED | 662 | 1: andi. r0,r4,_TIF_NEED_RESCHED |
663 | beq 2f | 663 | beq 2f |
664 | bl .restore_interrupts | 664 | bl restore_interrupts |
665 | SCHEDULE_USER | 665 | SCHEDULE_USER |
666 | b .ret_from_except_lite | 666 | b ret_from_except_lite |
667 | 2: | 667 | 2: |
668 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 668 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
669 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM | 669 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM |
670 | bne 3f /* only restore TM if nothing else to do */ | 670 | bne 3f /* only restore TM if nothing else to do */ |
671 | addi r3,r1,STACK_FRAME_OVERHEAD | 671 | addi r3,r1,STACK_FRAME_OVERHEAD |
672 | bl .restore_tm_state | 672 | bl restore_tm_state |
673 | b restore | 673 | b restore |
674 | 3: | 674 | 3: |
675 | #endif | 675 | #endif |
676 | bl .save_nvgprs | 676 | bl save_nvgprs |
677 | bl .restore_interrupts | 677 | bl restore_interrupts |
678 | addi r3,r1,STACK_FRAME_OVERHEAD | 678 | addi r3,r1,STACK_FRAME_OVERHEAD |
679 | bl .do_notify_resume | 679 | bl do_notify_resume |
680 | b .ret_from_except | 680 | b ret_from_except |
681 | 681 | ||
682 | resume_kernel: | 682 | resume_kernel: |
683 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ | 683 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
@@ -730,7 +730,7 @@ resume_kernel: | |||
730 | * sure we are soft-disabled first and reconcile irq state. | 730 | * sure we are soft-disabled first and reconcile irq state. |
731 | */ | 731 | */ |
732 | RECONCILE_IRQ_STATE(r3,r4) | 732 | RECONCILE_IRQ_STATE(r3,r4) |
733 | 1: bl .preempt_schedule_irq | 733 | 1: bl preempt_schedule_irq |
734 | 734 | ||
735 | /* Re-test flags and eventually loop */ | 735 | /* Re-test flags and eventually loop */ |
736 | CURRENT_THREAD_INFO(r9, r1) | 736 | CURRENT_THREAD_INFO(r9, r1) |
@@ -792,7 +792,7 @@ restore_no_replay: | |||
792 | */ | 792 | */ |
793 | do_restore: | 793 | do_restore: |
794 | #ifdef CONFIG_PPC_BOOK3E | 794 | #ifdef CONFIG_PPC_BOOK3E |
795 | b .exception_return_book3e | 795 | b exception_return_book3e |
796 | #else | 796 | #else |
797 | /* | 797 | /* |
798 | * Clear the reservation. If we know the CPU tracks the address of | 798 | * Clear the reservation. If we know the CPU tracks the address of |
@@ -907,7 +907,7 @@ restore_check_irq_replay: | |||
907 | * | 907 | * |
908 | * Still, this might be useful for things like hash_page | 908 | * Still, this might be useful for things like hash_page |
909 | */ | 909 | */ |
910 | bl .__check_irq_replay | 910 | bl __check_irq_replay |
911 | cmpwi cr0,r3,0 | 911 | cmpwi cr0,r3,0 |
912 | beq restore_no_replay | 912 | beq restore_no_replay |
913 | 913 | ||
@@ -928,13 +928,13 @@ restore_check_irq_replay: | |||
928 | cmpwi cr0,r3,0x500 | 928 | cmpwi cr0,r3,0x500 |
929 | bne 1f | 929 | bne 1f |
930 | addi r3,r1,STACK_FRAME_OVERHEAD; | 930 | addi r3,r1,STACK_FRAME_OVERHEAD; |
931 | bl .do_IRQ | 931 | bl do_IRQ |
932 | b .ret_from_except | 932 | b ret_from_except |
933 | 1: cmpwi cr0,r3,0x900 | 933 | 1: cmpwi cr0,r3,0x900 |
934 | bne 1f | 934 | bne 1f |
935 | addi r3,r1,STACK_FRAME_OVERHEAD; | 935 | addi r3,r1,STACK_FRAME_OVERHEAD; |
936 | bl .timer_interrupt | 936 | bl timer_interrupt |
937 | b .ret_from_except | 937 | b ret_from_except |
938 | #ifdef CONFIG_PPC_DOORBELL | 938 | #ifdef CONFIG_PPC_DOORBELL |
939 | 1: | 939 | 1: |
940 | #ifdef CONFIG_PPC_BOOK3E | 940 | #ifdef CONFIG_PPC_BOOK3E |
@@ -948,14 +948,14 @@ restore_check_irq_replay: | |||
948 | #endif /* CONFIG_PPC_BOOK3E */ | 948 | #endif /* CONFIG_PPC_BOOK3E */ |
949 | bne 1f | 949 | bne 1f |
950 | addi r3,r1,STACK_FRAME_OVERHEAD; | 950 | addi r3,r1,STACK_FRAME_OVERHEAD; |
951 | bl .doorbell_exception | 951 | bl doorbell_exception |
952 | b .ret_from_except | 952 | b ret_from_except |
953 | #endif /* CONFIG_PPC_DOORBELL */ | 953 | #endif /* CONFIG_PPC_DOORBELL */ |
954 | 1: b .ret_from_except /* What else to do here ? */ | 954 | 1: b ret_from_except /* What else to do here ? */ |
955 | 955 | ||
956 | unrecov_restore: | 956 | unrecov_restore: |
957 | addi r3,r1,STACK_FRAME_OVERHEAD | 957 | addi r3,r1,STACK_FRAME_OVERHEAD |
958 | bl .unrecoverable_exception | 958 | bl unrecoverable_exception |
959 | b unrecov_restore | 959 | b unrecov_restore |
960 | 960 | ||
961 | #ifdef CONFIG_PPC_RTAS | 961 | #ifdef CONFIG_PPC_RTAS |
@@ -1238,7 +1238,7 @@ _GLOBAL(ftrace_graph_caller) | |||
1238 | ld r11, 112(r1) | 1238 | ld r11, 112(r1) |
1239 | addi r3, r11, 16 | 1239 | addi r3, r11, 16 |
1240 | 1240 | ||
1241 | bl .prepare_ftrace_return | 1241 | bl prepare_ftrace_return |
1242 | nop | 1242 | nop |
1243 | 1243 | ||
1244 | ld r0, 128(r1) | 1244 | ld r0, 128(r1) |
@@ -1254,7 +1254,7 @@ _GLOBAL(return_to_handler) | |||
1254 | mr r31, r1 | 1254 | mr r31, r1 |
1255 | stdu r1, -112(r1) | 1255 | stdu r1, -112(r1) |
1256 | 1256 | ||
1257 | bl .ftrace_return_to_handler | 1257 | bl ftrace_return_to_handler |
1258 | nop | 1258 | nop |
1259 | 1259 | ||
1260 | /* return value has real return address */ | 1260 | /* return value has real return address */ |
@@ -1284,7 +1284,7 @@ _GLOBAL(mod_return_to_handler) | |||
1284 | */ | 1284 | */ |
1285 | ld r2, PACATOC(r13) | 1285 | ld r2, PACATOC(r13) |
1286 | 1286 | ||
1287 | bl .ftrace_return_to_handler | 1287 | bl ftrace_return_to_handler |
1288 | nop | 1288 | nop |
1289 | 1289 | ||
1290 | /* return value has real return address */ | 1290 | /* return value has real return address */ |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c1bee3ce9d1f..5e37338c2e5c 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ | |||
499 | CHECK_NAPPING(); \ | 499 | CHECK_NAPPING(); \ |
500 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 500 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
501 | bl hdlr; \ | 501 | bl hdlr; \ |
502 | b .ret_from_except_lite; | 502 | b ret_from_except_lite; |
503 | 503 | ||
504 | /* This value is used to mark exception frames on the stack. */ | 504 | /* This value is used to mark exception frames on the stack. */ |
505 | .section ".toc","aw" | 505 | .section ".toc","aw" |
@@ -550,11 +550,11 @@ interrupt_end_book3e: | |||
550 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, | 550 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, |
551 | PROLOG_ADDITION_NONE) | 551 | PROLOG_ADDITION_NONE) |
552 | EXCEPTION_COMMON_CRIT(0x100) | 552 | EXCEPTION_COMMON_CRIT(0x100) |
553 | bl .save_nvgprs | 553 | bl save_nvgprs |
554 | bl special_reg_save | 554 | bl special_reg_save |
555 | CHECK_NAPPING(); | 555 | CHECK_NAPPING(); |
556 | addi r3,r1,STACK_FRAME_OVERHEAD | 556 | addi r3,r1,STACK_FRAME_OVERHEAD |
557 | bl .unknown_exception | 557 | bl unknown_exception |
558 | b ret_from_crit_except | 558 | b ret_from_crit_except |
559 | 559 | ||
560 | /* Machine Check Interrupt */ | 560 | /* Machine Check Interrupt */ |
@@ -562,11 +562,11 @@ interrupt_end_book3e: | |||
562 | MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, | 562 | MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, |
563 | PROLOG_ADDITION_NONE) | 563 | PROLOG_ADDITION_NONE) |
564 | EXCEPTION_COMMON_MC(0x000) | 564 | EXCEPTION_COMMON_MC(0x000) |
565 | bl .save_nvgprs | 565 | bl save_nvgprs |
566 | bl special_reg_save | 566 | bl special_reg_save |
567 | CHECK_NAPPING(); | 567 | CHECK_NAPPING(); |
568 | addi r3,r1,STACK_FRAME_OVERHEAD | 568 | addi r3,r1,STACK_FRAME_OVERHEAD |
569 | bl .machine_check_exception | 569 | bl machine_check_exception |
570 | b ret_from_mc_except | 570 | b ret_from_mc_except |
571 | 571 | ||
572 | /* Data Storage Interrupt */ | 572 | /* Data Storage Interrupt */ |
@@ -612,9 +612,9 @@ interrupt_end_book3e: | |||
612 | std r14,_DSISR(r1) | 612 | std r14,_DSISR(r1) |
613 | addi r3,r1,STACK_FRAME_OVERHEAD | 613 | addi r3,r1,STACK_FRAME_OVERHEAD |
614 | ld r14,PACA_EXGEN+EX_R14(r13) | 614 | ld r14,PACA_EXGEN+EX_R14(r13) |
615 | bl .save_nvgprs | 615 | bl save_nvgprs |
616 | bl .program_check_exception | 616 | bl program_check_exception |
617 | b .ret_from_except | 617 | b ret_from_except |
618 | 618 | ||
619 | /* Floating Point Unavailable Interrupt */ | 619 | /* Floating Point Unavailable Interrupt */ |
620 | START_EXCEPTION(fp_unavailable); | 620 | START_EXCEPTION(fp_unavailable); |
@@ -625,13 +625,13 @@ interrupt_end_book3e: | |||
625 | ld r12,_MSR(r1) | 625 | ld r12,_MSR(r1) |
626 | andi. r0,r12,MSR_PR; | 626 | andi. r0,r12,MSR_PR; |
627 | beq- 1f | 627 | beq- 1f |
628 | bl .load_up_fpu | 628 | bl load_up_fpu |
629 | b fast_exception_return | 629 | b fast_exception_return |
630 | 1: INTS_DISABLE | 630 | 1: INTS_DISABLE |
631 | bl .save_nvgprs | 631 | bl save_nvgprs |
632 | addi r3,r1,STACK_FRAME_OVERHEAD | 632 | addi r3,r1,STACK_FRAME_OVERHEAD |
633 | bl .kernel_fp_unavailable_exception | 633 | bl kernel_fp_unavailable_exception |
634 | b .ret_from_except | 634 | b ret_from_except |
635 | 635 | ||
636 | /* Altivec Unavailable Interrupt */ | 636 | /* Altivec Unavailable Interrupt */ |
637 | START_EXCEPTION(altivec_unavailable); | 637 | START_EXCEPTION(altivec_unavailable); |
@@ -644,16 +644,16 @@ BEGIN_FTR_SECTION | |||
644 | ld r12,_MSR(r1) | 644 | ld r12,_MSR(r1) |
645 | andi. r0,r12,MSR_PR; | 645 | andi. r0,r12,MSR_PR; |
646 | beq- 1f | 646 | beq- 1f |
647 | bl .load_up_altivec | 647 | bl load_up_altivec |
648 | b fast_exception_return | 648 | b fast_exception_return |
649 | 1: | 649 | 1: |
650 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 650 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
651 | #endif | 651 | #endif |
652 | INTS_DISABLE | 652 | INTS_DISABLE |
653 | bl .save_nvgprs | 653 | bl save_nvgprs |
654 | addi r3,r1,STACK_FRAME_OVERHEAD | 654 | addi r3,r1,STACK_FRAME_OVERHEAD |
655 | bl .altivec_unavailable_exception | 655 | bl altivec_unavailable_exception |
656 | b .ret_from_except | 656 | b ret_from_except |
657 | 657 | ||
658 | /* AltiVec Assist */ | 658 | /* AltiVec Assist */ |
659 | START_EXCEPTION(altivec_assist); | 659 | START_EXCEPTION(altivec_assist); |
@@ -662,16 +662,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
662 | PROLOG_ADDITION_NONE) | 662 | PROLOG_ADDITION_NONE) |
663 | EXCEPTION_COMMON(0x220) | 663 | EXCEPTION_COMMON(0x220) |
664 | INTS_DISABLE | 664 | INTS_DISABLE |
665 | bl .save_nvgprs | 665 | bl save_nvgprs |
666 | addi r3,r1,STACK_FRAME_OVERHEAD | 666 | addi r3,r1,STACK_FRAME_OVERHEAD |
667 | #ifdef CONFIG_ALTIVEC | 667 | #ifdef CONFIG_ALTIVEC |
668 | BEGIN_FTR_SECTION | 668 | BEGIN_FTR_SECTION |
669 | bl .altivec_assist_exception | 669 | bl altivec_assist_exception |
670 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 670 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
671 | #else | 671 | #else |
672 | bl .unknown_exception | 672 | bl unknown_exception |
673 | #endif | 673 | #endif |
674 | b .ret_from_except | 674 | b ret_from_except |
675 | 675 | ||
676 | 676 | ||
677 | /* Decrementer Interrupt */ | 677 | /* Decrementer Interrupt */ |
@@ -687,14 +687,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
687 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, | 687 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, |
688 | PROLOG_ADDITION_NONE) | 688 | PROLOG_ADDITION_NONE) |
689 | EXCEPTION_COMMON_CRIT(0x9f0) | 689 | EXCEPTION_COMMON_CRIT(0x9f0) |
690 | bl .save_nvgprs | 690 | bl save_nvgprs |
691 | bl special_reg_save | 691 | bl special_reg_save |
692 | CHECK_NAPPING(); | 692 | CHECK_NAPPING(); |
693 | addi r3,r1,STACK_FRAME_OVERHEAD | 693 | addi r3,r1,STACK_FRAME_OVERHEAD |
694 | #ifdef CONFIG_BOOKE_WDT | 694 | #ifdef CONFIG_BOOKE_WDT |
695 | bl .WatchdogException | 695 | bl WatchdogException |
696 | #else | 696 | #else |
697 | bl .unknown_exception | 697 | bl unknown_exception |
698 | #endif | 698 | #endif |
699 | b ret_from_crit_except | 699 | b ret_from_crit_except |
700 | 700 | ||
@@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
712 | PROLOG_ADDITION_NONE) | 712 | PROLOG_ADDITION_NONE) |
713 | EXCEPTION_COMMON(0xf20) | 713 | EXCEPTION_COMMON(0xf20) |
714 | INTS_DISABLE | 714 | INTS_DISABLE |
715 | bl .save_nvgprs | 715 | bl save_nvgprs |
716 | addi r3,r1,STACK_FRAME_OVERHEAD | 716 | addi r3,r1,STACK_FRAME_OVERHEAD |
717 | bl .unknown_exception | 717 | bl unknown_exception |
718 | b .ret_from_except | 718 | b ret_from_except |
719 | 719 | ||
720 | /* Debug exception as a critical interrupt*/ | 720 | /* Debug exception as a critical interrupt*/ |
721 | START_EXCEPTION(debug_crit); | 721 | START_EXCEPTION(debug_crit); |
@@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
774 | mr r4,r14 | 774 | mr r4,r14 |
775 | ld r14,PACA_EXCRIT+EX_R14(r13) | 775 | ld r14,PACA_EXCRIT+EX_R14(r13) |
776 | ld r15,PACA_EXCRIT+EX_R15(r13) | 776 | ld r15,PACA_EXCRIT+EX_R15(r13) |
777 | bl .save_nvgprs | 777 | bl save_nvgprs |
778 | bl .DebugException | 778 | bl DebugException |
779 | b .ret_from_except | 779 | b ret_from_except |
780 | 780 | ||
781 | kernel_dbg_exc: | 781 | kernel_dbg_exc: |
782 | b . /* NYI */ | 782 | b . /* NYI */ |
@@ -839,9 +839,9 @@ kernel_dbg_exc: | |||
839 | mr r4,r14 | 839 | mr r4,r14 |
840 | ld r14,PACA_EXDBG+EX_R14(r13) | 840 | ld r14,PACA_EXDBG+EX_R14(r13) |
841 | ld r15,PACA_EXDBG+EX_R15(r13) | 841 | ld r15,PACA_EXDBG+EX_R15(r13) |
842 | bl .save_nvgprs | 842 | bl save_nvgprs |
843 | bl .DebugException | 843 | bl DebugException |
844 | b .ret_from_except | 844 | b ret_from_except |
845 | 845 | ||
846 | START_EXCEPTION(perfmon); | 846 | START_EXCEPTION(perfmon); |
847 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, | 847 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, |
@@ -850,8 +850,8 @@ kernel_dbg_exc: | |||
850 | INTS_DISABLE | 850 | INTS_DISABLE |
851 | CHECK_NAPPING() | 851 | CHECK_NAPPING() |
852 | addi r3,r1,STACK_FRAME_OVERHEAD | 852 | addi r3,r1,STACK_FRAME_OVERHEAD |
853 | bl .performance_monitor_exception | 853 | bl performance_monitor_exception |
854 | b .ret_from_except_lite | 854 | b ret_from_except_lite |
855 | 855 | ||
856 | /* Doorbell interrupt */ | 856 | /* Doorbell interrupt */ |
857 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, | 857 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, |
@@ -862,11 +862,11 @@ kernel_dbg_exc: | |||
862 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, | 862 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, |
863 | PROLOG_ADDITION_NONE) | 863 | PROLOG_ADDITION_NONE) |
864 | EXCEPTION_COMMON_CRIT(0x2a0) | 864 | EXCEPTION_COMMON_CRIT(0x2a0) |
865 | bl .save_nvgprs | 865 | bl save_nvgprs |
866 | bl special_reg_save | 866 | bl special_reg_save |
867 | CHECK_NAPPING(); | 867 | CHECK_NAPPING(); |
868 | addi r3,r1,STACK_FRAME_OVERHEAD | 868 | addi r3,r1,STACK_FRAME_OVERHEAD |
869 | bl .unknown_exception | 869 | bl unknown_exception |
870 | b ret_from_crit_except | 870 | b ret_from_crit_except |
871 | 871 | ||
872 | /* | 872 | /* |
@@ -878,21 +878,21 @@ kernel_dbg_exc: | |||
878 | PROLOG_ADDITION_NONE) | 878 | PROLOG_ADDITION_NONE) |
879 | EXCEPTION_COMMON(0x2c0) | 879 | EXCEPTION_COMMON(0x2c0) |
880 | addi r3,r1,STACK_FRAME_OVERHEAD | 880 | addi r3,r1,STACK_FRAME_OVERHEAD |
881 | bl .save_nvgprs | 881 | bl save_nvgprs |
882 | INTS_RESTORE_HARD | 882 | INTS_RESTORE_HARD |
883 | bl .unknown_exception | 883 | bl unknown_exception |
884 | b .ret_from_except | 884 | b ret_from_except |
885 | 885 | ||
886 | /* Guest Doorbell critical Interrupt */ | 886 | /* Guest Doorbell critical Interrupt */ |
887 | START_EXCEPTION(guest_doorbell_crit); | 887 | START_EXCEPTION(guest_doorbell_crit); |
888 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, | 888 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, |
889 | PROLOG_ADDITION_NONE) | 889 | PROLOG_ADDITION_NONE) |
890 | EXCEPTION_COMMON_CRIT(0x2e0) | 890 | EXCEPTION_COMMON_CRIT(0x2e0) |
891 | bl .save_nvgprs | 891 | bl save_nvgprs |
892 | bl special_reg_save | 892 | bl special_reg_save |
893 | CHECK_NAPPING(); | 893 | CHECK_NAPPING(); |
894 | addi r3,r1,STACK_FRAME_OVERHEAD | 894 | addi r3,r1,STACK_FRAME_OVERHEAD |
895 | bl .unknown_exception | 895 | bl unknown_exception |
896 | b ret_from_crit_except | 896 | b ret_from_crit_except |
897 | 897 | ||
898 | /* Hypervisor call */ | 898 | /* Hypervisor call */ |
@@ -901,10 +901,10 @@ kernel_dbg_exc: | |||
901 | PROLOG_ADDITION_NONE) | 901 | PROLOG_ADDITION_NONE) |
902 | EXCEPTION_COMMON(0x310) | 902 | EXCEPTION_COMMON(0x310) |
903 | addi r3,r1,STACK_FRAME_OVERHEAD | 903 | addi r3,r1,STACK_FRAME_OVERHEAD |
904 | bl .save_nvgprs | 904 | bl save_nvgprs |
905 | INTS_RESTORE_HARD | 905 | INTS_RESTORE_HARD |
906 | bl .unknown_exception | 906 | bl unknown_exception |
907 | b .ret_from_except | 907 | b ret_from_except |
908 | 908 | ||
909 | /* Embedded Hypervisor priviledged */ | 909 | /* Embedded Hypervisor priviledged */ |
910 | START_EXCEPTION(ehpriv); | 910 | START_EXCEPTION(ehpriv); |
@@ -912,10 +912,10 @@ kernel_dbg_exc: | |||
912 | PROLOG_ADDITION_NONE) | 912 | PROLOG_ADDITION_NONE) |
913 | EXCEPTION_COMMON(0x320) | 913 | EXCEPTION_COMMON(0x320) |
914 | addi r3,r1,STACK_FRAME_OVERHEAD | 914 | addi r3,r1,STACK_FRAME_OVERHEAD |
915 | bl .save_nvgprs | 915 | bl save_nvgprs |
916 | INTS_RESTORE_HARD | 916 | INTS_RESTORE_HARD |
917 | bl .unknown_exception | 917 | bl unknown_exception |
918 | b .ret_from_except | 918 | b ret_from_except |
919 | 919 | ||
920 | /* LRAT Error interrupt */ | 920 | /* LRAT Error interrupt */ |
921 | START_EXCEPTION(lrat_error); | 921 | START_EXCEPTION(lrat_error); |
@@ -1014,16 +1014,16 @@ storage_fault_common: | |||
1014 | mr r5,r15 | 1014 | mr r5,r15 |
1015 | ld r14,PACA_EXGEN+EX_R14(r13) | 1015 | ld r14,PACA_EXGEN+EX_R14(r13) |
1016 | ld r15,PACA_EXGEN+EX_R15(r13) | 1016 | ld r15,PACA_EXGEN+EX_R15(r13) |
1017 | bl .do_page_fault | 1017 | bl do_page_fault |
1018 | cmpdi r3,0 | 1018 | cmpdi r3,0 |
1019 | bne- 1f | 1019 | bne- 1f |
1020 | b .ret_from_except_lite | 1020 | b ret_from_except_lite |
1021 | 1: bl .save_nvgprs | 1021 | 1: bl save_nvgprs |
1022 | mr r5,r3 | 1022 | mr r5,r3 |
1023 | addi r3,r1,STACK_FRAME_OVERHEAD | 1023 | addi r3,r1,STACK_FRAME_OVERHEAD |
1024 | ld r4,_DAR(r1) | 1024 | ld r4,_DAR(r1) |
1025 | bl .bad_page_fault | 1025 | bl bad_page_fault |
1026 | b .ret_from_except | 1026 | b ret_from_except |
1027 | 1027 | ||
1028 | /* | 1028 | /* |
1029 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it | 1029 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it |
@@ -1035,10 +1035,10 @@ alignment_more: | |||
1035 | addi r3,r1,STACK_FRAME_OVERHEAD | 1035 | addi r3,r1,STACK_FRAME_OVERHEAD |
1036 | ld r14,PACA_EXGEN+EX_R14(r13) | 1036 | ld r14,PACA_EXGEN+EX_R14(r13) |
1037 | ld r15,PACA_EXGEN+EX_R15(r13) | 1037 | ld r15,PACA_EXGEN+EX_R15(r13) |
1038 | bl .save_nvgprs | 1038 | bl save_nvgprs |
1039 | INTS_RESTORE_HARD | 1039 | INTS_RESTORE_HARD |
1040 | bl .alignment_exception | 1040 | bl alignment_exception |
1041 | b .ret_from_except | 1041 | b ret_from_except |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * We branch here from entry_64.S for the last stage of the exception | 1044 | * We branch here from entry_64.S for the last stage of the exception |
@@ -1172,7 +1172,7 @@ bad_stack_book3e: | |||
1172 | std r12,0(r11) | 1172 | std r12,0(r11) |
1173 | ld r2,PACATOC(r13) | 1173 | ld r2,PACATOC(r13) |
1174 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1174 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1175 | bl .kernel_bad_stack | 1175 | bl kernel_bad_stack |
1176 | b 1b | 1176 | b 1b |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
@@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) | |||
1521 | * and always use AS 0, so we just set it up to match our link | 1521 | * and always use AS 0, so we just set it up to match our link |
1522 | * address and never use 0 based addresses. | 1522 | * address and never use 0 based addresses. |
1523 | */ | 1523 | */ |
1524 | bl .initial_tlb_book3e | 1524 | bl initial_tlb_book3e |
1525 | 1525 | ||
1526 | /* Init global core bits */ | 1526 | /* Init global core bits */ |
1527 | bl .init_core_book3e | 1527 | bl init_core_book3e |
1528 | 1528 | ||
1529 | /* Init per-thread bits */ | 1529 | /* Init per-thread bits */ |
1530 | bl .init_thread_book3e | 1530 | bl init_thread_book3e |
1531 | 1531 | ||
1532 | /* Return to common init code */ | 1532 | /* Return to common init code */ |
1533 | tovirt(r28,r28) | 1533 | tovirt(r28,r28) |
@@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) | |||
1548 | */ | 1548 | */ |
1549 | _GLOBAL(book3e_secondary_core_init_tlb_set) | 1549 | _GLOBAL(book3e_secondary_core_init_tlb_set) |
1550 | li r4,1 | 1550 | li r4,1 |
1551 | b .generic_secondary_smp_init | 1551 | b generic_secondary_smp_init |
1552 | 1552 | ||
1553 | _GLOBAL(book3e_secondary_core_init) | 1553 | _GLOBAL(book3e_secondary_core_init) |
1554 | mflr r28 | 1554 | mflr r28 |
@@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) | |||
1558 | bne 2f | 1558 | bne 2f |
1559 | 1559 | ||
1560 | /* Setup TLB for this core */ | 1560 | /* Setup TLB for this core */ |
1561 | bl .initial_tlb_book3e | 1561 | bl initial_tlb_book3e |
1562 | 1562 | ||
1563 | /* We can return from the above running at a different | 1563 | /* We can return from the above running at a different |
1564 | * address, so recalculate r2 (TOC) | 1564 | * address, so recalculate r2 (TOC) |
1565 | */ | 1565 | */ |
1566 | bl .relative_toc | 1566 | bl relative_toc |
1567 | 1567 | ||
1568 | /* Init global core bits */ | 1568 | /* Init global core bits */ |
1569 | 2: bl .init_core_book3e | 1569 | 2: bl init_core_book3e |
1570 | 1570 | ||
1571 | /* Init per-thread bits */ | 1571 | /* Init per-thread bits */ |
1572 | 3: bl .init_thread_book3e | 1572 | 3: bl init_thread_book3e |
1573 | 1573 | ||
1574 | /* Return to common init code at proper virtual address. | 1574 | /* Return to common init code at proper virtual address. |
1575 | * | 1575 | * |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3afd3915921a..28391e048120 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -132,12 +132,12 @@ BEGIN_FTR_SECTION | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | beq cr1,2f | 134 | beq cr1,2f |
135 | b .power7_wakeup_noloss | 135 | b power7_wakeup_noloss |
136 | 2: b .power7_wakeup_loss | 136 | 2: b power7_wakeup_loss |
137 | 137 | ||
138 | /* Fast Sleep wakeup on PowerNV */ | 138 | /* Fast Sleep wakeup on PowerNV */ |
139 | 8: GET_PACA(r13) | 139 | 8: GET_PACA(r13) |
140 | b .power7_wakeup_tb_loss | 140 | b power7_wakeup_tb_loss |
141 | 141 | ||
142 | 9: | 142 | 9: |
143 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) | 143 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
@@ -211,7 +211,7 @@ data_access_slb_pSeries: | |||
211 | #endif /* __DISABLED__ */ | 211 | #endif /* __DISABLED__ */ |
212 | mfspr r12,SPRN_SRR1 | 212 | mfspr r12,SPRN_SRR1 |
213 | #ifndef CONFIG_RELOCATABLE | 213 | #ifndef CONFIG_RELOCATABLE |
214 | b .slb_miss_realmode | 214 | b slb_miss_realmode |
215 | #else | 215 | #else |
216 | /* | 216 | /* |
217 | * We can't just use a direct branch to .slb_miss_realmode | 217 | * We can't just use a direct branch to .slb_miss_realmode |
@@ -243,7 +243,7 @@ instruction_access_slb_pSeries: | |||
243 | #endif /* __DISABLED__ */ | 243 | #endif /* __DISABLED__ */ |
244 | mfspr r12,SPRN_SRR1 | 244 | mfspr r12,SPRN_SRR1 |
245 | #ifndef CONFIG_RELOCATABLE | 245 | #ifndef CONFIG_RELOCATABLE |
246 | b .slb_miss_realmode | 246 | b slb_miss_realmode |
247 | #else | 247 | #else |
248 | mfctr r11 | 248 | mfctr r11 |
249 | ld r10,PACAKBASE(r13) | 249 | ld r10,PACAKBASE(r13) |
@@ -829,7 +829,7 @@ data_access_slb_relon_pSeries: | |||
829 | mfspr r3,SPRN_DAR | 829 | mfspr r3,SPRN_DAR |
830 | mfspr r12,SPRN_SRR1 | 830 | mfspr r12,SPRN_SRR1 |
831 | #ifndef CONFIG_RELOCATABLE | 831 | #ifndef CONFIG_RELOCATABLE |
832 | b .slb_miss_realmode | 832 | b slb_miss_realmode |
833 | #else | 833 | #else |
834 | /* | 834 | /* |
835 | * We can't just use a direct branch to .slb_miss_realmode | 835 | * We can't just use a direct branch to .slb_miss_realmode |
@@ -854,7 +854,7 @@ instruction_access_slb_relon_pSeries: | |||
854 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 854 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
855 | mfspr r12,SPRN_SRR1 | 855 | mfspr r12,SPRN_SRR1 |
856 | #ifndef CONFIG_RELOCATABLE | 856 | #ifndef CONFIG_RELOCATABLE |
857 | b .slb_miss_realmode | 857 | b slb_miss_realmode |
858 | #else | 858 | #else |
859 | mfctr r11 | 859 | mfctr r11 |
860 | ld r10,PACAKBASE(r13) | 860 | ld r10,PACAKBASE(r13) |
@@ -966,7 +966,7 @@ system_call_entry: | |||
966 | b system_call_common | 966 | b system_call_common |
967 | 967 | ||
968 | ppc64_runlatch_on_trampoline: | 968 | ppc64_runlatch_on_trampoline: |
969 | b .__ppc64_runlatch_on | 969 | b __ppc64_runlatch_on |
970 | 970 | ||
971 | /* | 971 | /* |
972 | * Here we have detected that the kernel stack pointer is bad. | 972 | * Here we have detected that the kernel stack pointer is bad. |
@@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
1025 | std r12,RESULT(r1) | 1025 | std r12,RESULT(r1) |
1026 | std r11,STACK_FRAME_OVERHEAD-16(r1) | 1026 | std r11,STACK_FRAME_OVERHEAD-16(r1) |
1027 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1027 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1028 | bl .kernel_bad_stack | 1028 | bl kernel_bad_stack |
1029 | b 1b | 1029 | b 1b |
1030 | 1030 | ||
1031 | /* | 1031 | /* |
@@ -1046,7 +1046,7 @@ data_access_common: | |||
1046 | ld r3,PACA_EXGEN+EX_DAR(r13) | 1046 | ld r3,PACA_EXGEN+EX_DAR(r13) |
1047 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1047 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1048 | li r5,0x300 | 1048 | li r5,0x300 |
1049 | b .do_hash_page /* Try to handle as hpte fault */ | 1049 | b do_hash_page /* Try to handle as hpte fault */ |
1050 | 1050 | ||
1051 | .align 7 | 1051 | .align 7 |
1052 | .globl h_data_storage_common | 1052 | .globl h_data_storage_common |
@@ -1056,11 +1056,11 @@ h_data_storage_common: | |||
1056 | mfspr r10,SPRN_HDSISR | 1056 | mfspr r10,SPRN_HDSISR |
1057 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 1057 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
1058 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | 1058 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) |
1059 | bl .save_nvgprs | 1059 | bl save_nvgprs |
1060 | DISABLE_INTS | 1060 | DISABLE_INTS |
1061 | addi r3,r1,STACK_FRAME_OVERHEAD | 1061 | addi r3,r1,STACK_FRAME_OVERHEAD |
1062 | bl .unknown_exception | 1062 | bl unknown_exception |
1063 | b .ret_from_except | 1063 | b ret_from_except |
1064 | 1064 | ||
1065 | .align 7 | 1065 | .align 7 |
1066 | .globl instruction_access_common | 1066 | .globl instruction_access_common |
@@ -1071,7 +1071,7 @@ instruction_access_common: | |||
1071 | ld r3,_NIP(r1) | 1071 | ld r3,_NIP(r1) |
1072 | andis. r4,r12,0x5820 | 1072 | andis. r4,r12,0x5820 |
1073 | li r5,0x400 | 1073 | li r5,0x400 |
1074 | b .do_hash_page /* Try to handle as hpte fault */ | 1074 | b do_hash_page /* Try to handle as hpte fault */ |
1075 | 1075 | ||
1076 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) | 1076 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) |
1077 | 1077 | ||
@@ -1088,7 +1088,7 @@ slb_miss_user_common: | |||
1088 | stw r9,PACA_EXGEN+EX_CCR(r13) | 1088 | stw r9,PACA_EXGEN+EX_CCR(r13) |
1089 | std r10,PACA_EXGEN+EX_LR(r13) | 1089 | std r10,PACA_EXGEN+EX_LR(r13) |
1090 | std r11,PACA_EXGEN+EX_SRR0(r13) | 1090 | std r11,PACA_EXGEN+EX_SRR0(r13) |
1091 | bl .slb_allocate_user | 1091 | bl slb_allocate_user |
1092 | 1092 | ||
1093 | ld r10,PACA_EXGEN+EX_LR(r13) | 1093 | ld r10,PACA_EXGEN+EX_LR(r13) |
1094 | ld r3,PACA_EXGEN+EX_R3(r13) | 1094 | ld r3,PACA_EXGEN+EX_R3(r13) |
@@ -1131,9 +1131,9 @@ slb_miss_fault: | |||
1131 | unrecov_user_slb: | 1131 | unrecov_user_slb: |
1132 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | 1132 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) |
1133 | DISABLE_INTS | 1133 | DISABLE_INTS |
1134 | bl .save_nvgprs | 1134 | bl save_nvgprs |
1135 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1135 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1136 | bl .unrecoverable_exception | 1136 | bl unrecoverable_exception |
1137 | b 1b | 1137 | b 1b |
1138 | 1138 | ||
1139 | #endif /* __DISABLED__ */ | 1139 | #endif /* __DISABLED__ */ |
@@ -1158,10 +1158,10 @@ machine_check_common: | |||
1158 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1158 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1159 | std r3,_DAR(r1) | 1159 | std r3,_DAR(r1) |
1160 | std r4,_DSISR(r1) | 1160 | std r4,_DSISR(r1) |
1161 | bl .save_nvgprs | 1161 | bl save_nvgprs |
1162 | addi r3,r1,STACK_FRAME_OVERHEAD | 1162 | addi r3,r1,STACK_FRAME_OVERHEAD |
1163 | bl .machine_check_exception | 1163 | bl machine_check_exception |
1164 | b .ret_from_except | 1164 | b ret_from_except |
1165 | 1165 | ||
1166 | .align 7 | 1166 | .align 7 |
1167 | .globl alignment_common | 1167 | .globl alignment_common |
@@ -1175,31 +1175,31 @@ alignment_common: | |||
1175 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1175 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1176 | std r3,_DAR(r1) | 1176 | std r3,_DAR(r1) |
1177 | std r4,_DSISR(r1) | 1177 | std r4,_DSISR(r1) |
1178 | bl .save_nvgprs | 1178 | bl save_nvgprs |
1179 | DISABLE_INTS | 1179 | DISABLE_INTS |
1180 | addi r3,r1,STACK_FRAME_OVERHEAD | 1180 | addi r3,r1,STACK_FRAME_OVERHEAD |
1181 | bl .alignment_exception | 1181 | bl alignment_exception |
1182 | b .ret_from_except | 1182 | b ret_from_except |
1183 | 1183 | ||
1184 | .align 7 | 1184 | .align 7 |
1185 | .globl program_check_common | 1185 | .globl program_check_common |
1186 | program_check_common: | 1186 | program_check_common: |
1187 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | 1187 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) |
1188 | bl .save_nvgprs | 1188 | bl save_nvgprs |
1189 | DISABLE_INTS | 1189 | DISABLE_INTS |
1190 | addi r3,r1,STACK_FRAME_OVERHEAD | 1190 | addi r3,r1,STACK_FRAME_OVERHEAD |
1191 | bl .program_check_exception | 1191 | bl program_check_exception |
1192 | b .ret_from_except | 1192 | b ret_from_except |
1193 | 1193 | ||
1194 | .align 7 | 1194 | .align 7 |
1195 | .globl fp_unavailable_common | 1195 | .globl fp_unavailable_common |
1196 | fp_unavailable_common: | 1196 | fp_unavailable_common: |
1197 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | 1197 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) |
1198 | bne 1f /* if from user, just load it up */ | 1198 | bne 1f /* if from user, just load it up */ |
1199 | bl .save_nvgprs | 1199 | bl save_nvgprs |
1200 | DISABLE_INTS | 1200 | DISABLE_INTS |
1201 | addi r3,r1,STACK_FRAME_OVERHEAD | 1201 | addi r3,r1,STACK_FRAME_OVERHEAD |
1202 | bl .kernel_fp_unavailable_exception | 1202 | bl kernel_fp_unavailable_exception |
1203 | BUG_OPCODE | 1203 | BUG_OPCODE |
1204 | 1: | 1204 | 1: |
1205 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1205 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
@@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION | |||
1211 | bne- 2f | 1211 | bne- 2f |
1212 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | 1212 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
1213 | #endif | 1213 | #endif |
1214 | bl .load_up_fpu | 1214 | bl load_up_fpu |
1215 | b fast_exception_return | 1215 | b fast_exception_return |
1216 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1216 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1217 | 2: /* User process was in a transaction */ | 1217 | 2: /* User process was in a transaction */ |
1218 | bl .save_nvgprs | 1218 | bl save_nvgprs |
1219 | DISABLE_INTS | 1219 | DISABLE_INTS |
1220 | addi r3,r1,STACK_FRAME_OVERHEAD | 1220 | addi r3,r1,STACK_FRAME_OVERHEAD |
1221 | bl .fp_unavailable_tm | 1221 | bl fp_unavailable_tm |
1222 | b .ret_from_except | 1222 | b ret_from_except |
1223 | #endif | 1223 | #endif |
1224 | .align 7 | 1224 | .align 7 |
1225 | .globl altivec_unavailable_common | 1225 | .globl altivec_unavailable_common |
@@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION | |||
1237 | bne- 2f | 1237 | bne- 2f |
1238 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) | 1238 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
1239 | #endif | 1239 | #endif |
1240 | bl .load_up_altivec | 1240 | bl load_up_altivec |
1241 | b fast_exception_return | 1241 | b fast_exception_return |
1242 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1242 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1243 | 2: /* User process was in a transaction */ | 1243 | 2: /* User process was in a transaction */ |
1244 | bl .save_nvgprs | 1244 | bl save_nvgprs |
1245 | DISABLE_INTS | 1245 | DISABLE_INTS |
1246 | addi r3,r1,STACK_FRAME_OVERHEAD | 1246 | addi r3,r1,STACK_FRAME_OVERHEAD |
1247 | bl .altivec_unavailable_tm | 1247 | bl altivec_unavailable_tm |
1248 | b .ret_from_except | 1248 | b ret_from_except |
1249 | #endif | 1249 | #endif |
1250 | 1: | 1250 | 1: |
1251 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 1251 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
1252 | #endif | 1252 | #endif |
1253 | bl .save_nvgprs | 1253 | bl save_nvgprs |
1254 | DISABLE_INTS | 1254 | DISABLE_INTS |
1255 | addi r3,r1,STACK_FRAME_OVERHEAD | 1255 | addi r3,r1,STACK_FRAME_OVERHEAD |
1256 | bl .altivec_unavailable_exception | 1256 | bl altivec_unavailable_exception |
1257 | b .ret_from_except | 1257 | b ret_from_except |
1258 | 1258 | ||
1259 | .align 7 | 1259 | .align 7 |
1260 | .globl vsx_unavailable_common | 1260 | .globl vsx_unavailable_common |
@@ -1272,23 +1272,23 @@ BEGIN_FTR_SECTION | |||
1272 | bne- 2f | 1272 | bne- 2f |
1273 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) | 1273 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
1274 | #endif | 1274 | #endif |
1275 | b .load_up_vsx | 1275 | b load_up_vsx |
1276 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1276 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1277 | 2: /* User process was in a transaction */ | 1277 | 2: /* User process was in a transaction */ |
1278 | bl .save_nvgprs | 1278 | bl save_nvgprs |
1279 | DISABLE_INTS | 1279 | DISABLE_INTS |
1280 | addi r3,r1,STACK_FRAME_OVERHEAD | 1280 | addi r3,r1,STACK_FRAME_OVERHEAD |
1281 | bl .vsx_unavailable_tm | 1281 | bl vsx_unavailable_tm |
1282 | b .ret_from_except | 1282 | b ret_from_except |
1283 | #endif | 1283 | #endif |
1284 | 1: | 1284 | 1: |
1285 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 1285 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
1286 | #endif | 1286 | #endif |
1287 | bl .save_nvgprs | 1287 | bl save_nvgprs |
1288 | DISABLE_INTS | 1288 | DISABLE_INTS |
1289 | addi r3,r1,STACK_FRAME_OVERHEAD | 1289 | addi r3,r1,STACK_FRAME_OVERHEAD |
1290 | bl .vsx_unavailable_exception | 1290 | bl vsx_unavailable_exception |
1291 | b .ret_from_except | 1291 | b ret_from_except |
1292 | 1292 | ||
1293 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) | 1293 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) |
1294 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) | 1294 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) |
@@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler) | |||
1386 | machine_check_handle_early: | 1386 | machine_check_handle_early: |
1387 | std r0,GPR0(r1) /* Save r0 */ | 1387 | std r0,GPR0(r1) /* Save r0 */ |
1388 | EXCEPTION_PROLOG_COMMON_3(0x200) | 1388 | EXCEPTION_PROLOG_COMMON_3(0x200) |
1389 | bl .save_nvgprs | 1389 | bl save_nvgprs |
1390 | addi r3,r1,STACK_FRAME_OVERHEAD | 1390 | addi r3,r1,STACK_FRAME_OVERHEAD |
1391 | bl .machine_check_early | 1391 | bl machine_check_early |
1392 | ld r12,_MSR(r1) | 1392 | ld r12,_MSR(r1) |
1393 | #ifdef CONFIG_PPC_P7_NAP | 1393 | #ifdef CONFIG_PPC_P7_NAP |
1394 | /* | 1394 | /* |
@@ -1408,11 +1408,11 @@ machine_check_handle_early: | |||
1408 | /* Supervisor state loss */ | 1408 | /* Supervisor state loss */ |
1409 | li r0,1 | 1409 | li r0,1 |
1410 | stb r0,PACA_NAPSTATELOST(r13) | 1410 | stb r0,PACA_NAPSTATELOST(r13) |
1411 | 3: bl .machine_check_queue_event | 1411 | 3: bl machine_check_queue_event |
1412 | MACHINE_CHECK_HANDLER_WINDUP | 1412 | MACHINE_CHECK_HANDLER_WINDUP |
1413 | GET_PACA(r13) | 1413 | GET_PACA(r13) |
1414 | ld r1,PACAR1(r13) | 1414 | ld r1,PACAR1(r13) |
1415 | b .power7_enter_nap_mode | 1415 | b power7_enter_nap_mode |
1416 | 4: | 1416 | 4: |
1417 | #endif | 1417 | #endif |
1418 | /* | 1418 | /* |
@@ -1444,7 +1444,7 @@ machine_check_handle_early: | |||
1444 | andi. r11,r12,MSR_RI | 1444 | andi. r11,r12,MSR_RI |
1445 | bne 2f | 1445 | bne 2f |
1446 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1446 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1447 | bl .unrecoverable_exception | 1447 | bl unrecoverable_exception |
1448 | b 1b | 1448 | b 1b |
1449 | 2: | 1449 | 2: |
1450 | /* | 1450 | /* |
@@ -1452,7 +1452,7 @@ machine_check_handle_early: | |||
1452 | * Queue up the MCE event so that we can log it later, while | 1452 | * Queue up the MCE event so that we can log it later, while |
1453 | * returning from kernel or opal call. | 1453 | * returning from kernel or opal call. |
1454 | */ | 1454 | */ |
1455 | bl .machine_check_queue_event | 1455 | bl machine_check_queue_event |
1456 | MACHINE_CHECK_HANDLER_WINDUP | 1456 | MACHINE_CHECK_HANDLER_WINDUP |
1457 | rfid | 1457 | rfid |
1458 | 9: | 1458 | 9: |
@@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode) | |||
1477 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1477 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1478 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 1478 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
1479 | 1479 | ||
1480 | bl .slb_allocate_realmode | 1480 | bl slb_allocate_realmode |
1481 | 1481 | ||
1482 | /* All done -- return from exception. */ | 1482 | /* All done -- return from exception. */ |
1483 | 1483 | ||
@@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode) | |||
1517 | unrecov_slb: | 1517 | unrecov_slb: |
1518 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | 1518 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) |
1519 | DISABLE_INTS | 1519 | DISABLE_INTS |
1520 | bl .save_nvgprs | 1520 | bl save_nvgprs |
1521 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1521 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1522 | bl .unrecoverable_exception | 1522 | bl unrecoverable_exception |
1523 | b 1b | 1523 | b 1b |
1524 | 1524 | ||
1525 | 1525 | ||
@@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
1573 | * | 1573 | * |
1574 | * at return r3 = 0 for success, 1 for page fault, negative for error | 1574 | * at return r3 = 0 for success, 1 for page fault, negative for error |
1575 | */ | 1575 | */ |
1576 | bl .hash_page /* build HPTE if possible */ | 1576 | bl hash_page /* build HPTE if possible */ |
1577 | cmpdi r3,0 /* see if hash_page succeeded */ | 1577 | cmpdi r3,0 /* see if hash_page succeeded */ |
1578 | 1578 | ||
1579 | /* Success */ | 1579 | /* Success */ |
@@ -1587,35 +1587,35 @@ handle_page_fault: | |||
1587 | 11: ld r4,_DAR(r1) | 1587 | 11: ld r4,_DAR(r1) |
1588 | ld r5,_DSISR(r1) | 1588 | ld r5,_DSISR(r1) |
1589 | addi r3,r1,STACK_FRAME_OVERHEAD | 1589 | addi r3,r1,STACK_FRAME_OVERHEAD |
1590 | bl .do_page_fault | 1590 | bl do_page_fault |
1591 | cmpdi r3,0 | 1591 | cmpdi r3,0 |
1592 | beq+ 12f | 1592 | beq+ 12f |
1593 | bl .save_nvgprs | 1593 | bl save_nvgprs |
1594 | mr r5,r3 | 1594 | mr r5,r3 |
1595 | addi r3,r1,STACK_FRAME_OVERHEAD | 1595 | addi r3,r1,STACK_FRAME_OVERHEAD |
1596 | lwz r4,_DAR(r1) | 1596 | lwz r4,_DAR(r1) |
1597 | bl .bad_page_fault | 1597 | bl bad_page_fault |
1598 | b .ret_from_except | 1598 | b ret_from_except |
1599 | 1599 | ||
1600 | /* We have a data breakpoint exception - handle it */ | 1600 | /* We have a data breakpoint exception - handle it */ |
1601 | handle_dabr_fault: | 1601 | handle_dabr_fault: |
1602 | bl .save_nvgprs | 1602 | bl save_nvgprs |
1603 | ld r4,_DAR(r1) | 1603 | ld r4,_DAR(r1) |
1604 | ld r5,_DSISR(r1) | 1604 | ld r5,_DSISR(r1) |
1605 | addi r3,r1,STACK_FRAME_OVERHEAD | 1605 | addi r3,r1,STACK_FRAME_OVERHEAD |
1606 | bl .do_break | 1606 | bl do_break |
1607 | 12: b .ret_from_except_lite | 1607 | 12: b ret_from_except_lite |
1608 | 1608 | ||
1609 | 1609 | ||
1610 | /* We have a page fault that hash_page could handle but HV refused | 1610 | /* We have a page fault that hash_page could handle but HV refused |
1611 | * the PTE insertion | 1611 | * the PTE insertion |
1612 | */ | 1612 | */ |
1613 | 13: bl .save_nvgprs | 1613 | 13: bl save_nvgprs |
1614 | mr r5,r3 | 1614 | mr r5,r3 |
1615 | addi r3,r1,STACK_FRAME_OVERHEAD | 1615 | addi r3,r1,STACK_FRAME_OVERHEAD |
1616 | ld r4,_DAR(r1) | 1616 | ld r4,_DAR(r1) |
1617 | bl .low_hash_fault | 1617 | bl low_hash_fault |
1618 | b .ret_from_except | 1618 | b ret_from_except |
1619 | 1619 | ||
1620 | /* | 1620 | /* |
1621 | * We come here as a result of a DSI at a point where we don't want | 1621 | * We come here as a result of a DSI at a point where we don't want |
@@ -1624,16 +1624,16 @@ handle_dabr_fault: | |||
1624 | * were soft-disabled. We want to invoke the exception handler for | 1624 | * were soft-disabled. We want to invoke the exception handler for |
1625 | * the access, or panic if there isn't a handler. | 1625 | * the access, or panic if there isn't a handler. |
1626 | */ | 1626 | */ |
1627 | 77: bl .save_nvgprs | 1627 | 77: bl save_nvgprs |
1628 | mr r4,r3 | 1628 | mr r4,r3 |
1629 | addi r3,r1,STACK_FRAME_OVERHEAD | 1629 | addi r3,r1,STACK_FRAME_OVERHEAD |
1630 | li r5,SIGSEGV | 1630 | li r5,SIGSEGV |
1631 | bl .bad_page_fault | 1631 | bl bad_page_fault |
1632 | b .ret_from_except | 1632 | b ret_from_except |
1633 | 1633 | ||
1634 | /* here we have a segment miss */ | 1634 | /* here we have a segment miss */ |
1635 | do_ste_alloc: | 1635 | do_ste_alloc: |
1636 | bl .ste_allocate /* try to insert stab entry */ | 1636 | bl ste_allocate /* try to insert stab entry */ |
1637 | cmpdi r3,0 | 1637 | cmpdi r3,0 |
1638 | bne- handle_page_fault | 1638 | bne- handle_page_fault |
1639 | b fast_exception_return | 1639 | b fast_exception_return |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b7363bd42452..afcfd631bf7f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -70,7 +70,7 @@ _GLOBAL(__start) | |||
70 | /* NOP this out unconditionally */ | 70 | /* NOP this out unconditionally */ |
71 | BEGIN_FTR_SECTION | 71 | BEGIN_FTR_SECTION |
72 | FIXUP_ENDIAN | 72 | FIXUP_ENDIAN |
73 | b .__start_initialization_multiplatform | 73 | b __start_initialization_multiplatform |
74 | END_FTR_SECTION(0, 1) | 74 | END_FTR_SECTION(0, 1) |
75 | 75 | ||
76 | /* Catch branch to 0 in real mode */ | 76 | /* Catch branch to 0 in real mode */ |
@@ -186,16 +186,16 @@ _GLOBAL(generic_secondary_thread_init) | |||
186 | mr r24,r3 | 186 | mr r24,r3 |
187 | 187 | ||
188 | /* turn on 64-bit mode */ | 188 | /* turn on 64-bit mode */ |
189 | bl .enable_64b_mode | 189 | bl enable_64b_mode |
190 | 190 | ||
191 | /* get a valid TOC pointer, wherever we're mapped at */ | 191 | /* get a valid TOC pointer, wherever we're mapped at */ |
192 | bl .relative_toc | 192 | bl relative_toc |
193 | tovirt(r2,r2) | 193 | tovirt(r2,r2) |
194 | 194 | ||
195 | #ifdef CONFIG_PPC_BOOK3E | 195 | #ifdef CONFIG_PPC_BOOK3E |
196 | /* Book3E initialization */ | 196 | /* Book3E initialization */ |
197 | mr r3,r24 | 197 | mr r3,r24 |
198 | bl .book3e_secondary_thread_init | 198 | bl book3e_secondary_thread_init |
199 | #endif | 199 | #endif |
200 | b generic_secondary_common_init | 200 | b generic_secondary_common_init |
201 | 201 | ||
@@ -214,17 +214,17 @@ _GLOBAL(generic_secondary_smp_init) | |||
214 | mr r25,r4 | 214 | mr r25,r4 |
215 | 215 | ||
216 | /* turn on 64-bit mode */ | 216 | /* turn on 64-bit mode */ |
217 | bl .enable_64b_mode | 217 | bl enable_64b_mode |
218 | 218 | ||
219 | /* get a valid TOC pointer, wherever we're mapped at */ | 219 | /* get a valid TOC pointer, wherever we're mapped at */ |
220 | bl .relative_toc | 220 | bl relative_toc |
221 | tovirt(r2,r2) | 221 | tovirt(r2,r2) |
222 | 222 | ||
223 | #ifdef CONFIG_PPC_BOOK3E | 223 | #ifdef CONFIG_PPC_BOOK3E |
224 | /* Book3E initialization */ | 224 | /* Book3E initialization */ |
225 | mr r3,r24 | 225 | mr r3,r24 |
226 | mr r4,r25 | 226 | mr r4,r25 |
227 | bl .book3e_secondary_core_init | 227 | bl book3e_secondary_core_init |
228 | #endif | 228 | #endif |
229 | 229 | ||
230 | generic_secondary_common_init: | 230 | generic_secondary_common_init: |
@@ -236,7 +236,7 @@ generic_secondary_common_init: | |||
236 | ld r13,0(r13) /* Get base vaddr of paca array */ | 236 | ld r13,0(r13) /* Get base vaddr of paca array */ |
237 | #ifndef CONFIG_SMP | 237 | #ifndef CONFIG_SMP |
238 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ | 238 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ |
239 | b .kexec_wait /* wait for next kernel if !SMP */ | 239 | b kexec_wait /* wait for next kernel if !SMP */ |
240 | #else | 240 | #else |
241 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ | 241 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ |
242 | lwz r7,0(r7) /* also the max paca allocated */ | 242 | lwz r7,0(r7) /* also the max paca allocated */ |
@@ -250,7 +250,7 @@ generic_secondary_common_init: | |||
250 | blt 1b | 250 | blt 1b |
251 | 251 | ||
252 | mr r3,r24 /* not found, copy phys to r3 */ | 252 | mr r3,r24 /* not found, copy phys to r3 */ |
253 | b .kexec_wait /* next kernel might do better */ | 253 | b kexec_wait /* next kernel might do better */ |
254 | 254 | ||
255 | 2: SET_PACA(r13) | 255 | 2: SET_PACA(r13) |
256 | #ifdef CONFIG_PPC_BOOK3E | 256 | #ifdef CONFIG_PPC_BOOK3E |
@@ -326,10 +326,10 @@ _STATIC(__mmu_off) | |||
326 | */ | 326 | */ |
327 | _GLOBAL(__start_initialization_multiplatform) | 327 | _GLOBAL(__start_initialization_multiplatform) |
328 | /* Make sure we are running in 64 bits mode */ | 328 | /* Make sure we are running in 64 bits mode */ |
329 | bl .enable_64b_mode | 329 | bl enable_64b_mode |
330 | 330 | ||
331 | /* Get TOC pointer (current runtime address) */ | 331 | /* Get TOC pointer (current runtime address) */ |
332 | bl .relative_toc | 332 | bl relative_toc |
333 | 333 | ||
334 | /* find out where we are now */ | 334 | /* find out where we are now */ |
335 | bcl 20,31,$+4 | 335 | bcl 20,31,$+4 |
@@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) | |||
342 | */ | 342 | */ |
343 | cmpldi cr0,r5,0 | 343 | cmpldi cr0,r5,0 |
344 | beq 1f | 344 | beq 1f |
345 | b .__boot_from_prom /* yes -> prom */ | 345 | b __boot_from_prom /* yes -> prom */ |
346 | 1: | 346 | 1: |
347 | /* Save parameters */ | 347 | /* Save parameters */ |
348 | mr r31,r3 | 348 | mr r31,r3 |
@@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) | |||
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | #ifdef CONFIG_PPC_BOOK3E | 356 | #ifdef CONFIG_PPC_BOOK3E |
357 | bl .start_initialization_book3e | 357 | bl start_initialization_book3e |
358 | b .__after_prom_start | 358 | b __after_prom_start |
359 | #else | 359 | #else |
360 | /* Setup some critical 970 SPRs before switching MMU off */ | 360 | /* Setup some critical 970 SPRs before switching MMU off */ |
361 | mfspr r0,SPRN_PVR | 361 | mfspr r0,SPRN_PVR |
@@ -368,12 +368,12 @@ _GLOBAL(__start_initialization_multiplatform) | |||
368 | beq 1f | 368 | beq 1f |
369 | cmpwi r0,0x45 /* 970GX */ | 369 | cmpwi r0,0x45 /* 970GX */ |
370 | bne 2f | 370 | bne 2f |
371 | 1: bl .__cpu_preinit_ppc970 | 371 | 1: bl __cpu_preinit_ppc970 |
372 | 2: | 372 | 2: |
373 | 373 | ||
374 | /* Switch off MMU if not already off */ | 374 | /* Switch off MMU if not already off */ |
375 | bl .__mmu_off | 375 | bl __mmu_off |
376 | b .__after_prom_start | 376 | b __after_prom_start |
377 | #endif /* CONFIG_PPC_BOOK3E */ | 377 | #endif /* CONFIG_PPC_BOOK3E */ |
378 | 378 | ||
379 | _INIT_STATIC(__boot_from_prom) | 379 | _INIT_STATIC(__boot_from_prom) |
@@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) | |||
395 | #ifdef CONFIG_RELOCATABLE | 395 | #ifdef CONFIG_RELOCATABLE |
396 | /* Relocate code for where we are now */ | 396 | /* Relocate code for where we are now */ |
397 | mr r3,r26 | 397 | mr r3,r26 |
398 | bl .relocate | 398 | bl relocate |
399 | #endif | 399 | #endif |
400 | 400 | ||
401 | /* Restore parameters */ | 401 | /* Restore parameters */ |
@@ -407,7 +407,7 @@ _INIT_STATIC(__boot_from_prom) | |||
407 | 407 | ||
408 | /* Do all of the interaction with OF client interface */ | 408 | /* Do all of the interaction with OF client interface */ |
409 | mr r8,r26 | 409 | mr r8,r26 |
410 | bl .prom_init | 410 | bl prom_init |
411 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ | 411 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ |
412 | 412 | ||
413 | /* We never return. We also hit that trap if trying to boot | 413 | /* We never return. We also hit that trap if trying to boot |
@@ -424,7 +424,7 @@ _STATIC(__after_prom_start) | |||
424 | bne 1f | 424 | bne 1f |
425 | add r25,r25,r26 | 425 | add r25,r25,r26 |
426 | 1: mr r3,r25 | 426 | 1: mr r3,r25 |
427 | bl .relocate | 427 | bl relocate |
428 | #endif | 428 | #endif |
429 | 429 | ||
430 | /* | 430 | /* |
@@ -464,7 +464,7 @@ _STATIC(__after_prom_start) | |||
464 | lis r5,(copy_to_here - _stext)@ha | 464 | lis r5,(copy_to_here - _stext)@ha |
465 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | 465 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ |
466 | 466 | ||
467 | bl .copy_and_flush /* copy the first n bytes */ | 467 | bl copy_and_flush /* copy the first n bytes */ |
468 | /* this includes the code being */ | 468 | /* this includes the code being */ |
469 | /* executed here. */ | 469 | /* executed here. */ |
470 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ | 470 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ |
@@ -478,9 +478,9 @@ p_end: .llong _end - _stext | |||
478 | 4: /* Now copy the rest of the kernel up to _end */ | 478 | 4: /* Now copy the rest of the kernel up to _end */ |
479 | addis r5,r26,(p_end - _stext)@ha | 479 | addis r5,r26,(p_end - _stext)@ha |
480 | ld r5,(p_end - _stext)@l(r5) /* get _end */ | 480 | ld r5,(p_end - _stext)@l(r5) /* get _end */ |
481 | 5: bl .copy_and_flush /* copy the rest */ | 481 | 5: bl copy_and_flush /* copy the rest */ |
482 | 482 | ||
483 | 9: b .start_here_multiplatform | 483 | 9: b start_here_multiplatform |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * Copy routine used to copy the kernel to start at physical address 0 | 486 | * Copy routine used to copy the kernel to start at physical address 0 |
@@ -544,7 +544,7 @@ __secondary_start_pmac_0: | |||
544 | 544 | ||
545 | _GLOBAL(pmac_secondary_start) | 545 | _GLOBAL(pmac_secondary_start) |
546 | /* turn on 64-bit mode */ | 546 | /* turn on 64-bit mode */ |
547 | bl .enable_64b_mode | 547 | bl enable_64b_mode |
548 | 548 | ||
549 | li r0,0 | 549 | li r0,0 |
550 | mfspr r3,SPRN_HID4 | 550 | mfspr r3,SPRN_HID4 |
@@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) | |||
556 | slbia | 556 | slbia |
557 | 557 | ||
558 | /* get TOC pointer (real address) */ | 558 | /* get TOC pointer (real address) */ |
559 | bl .relative_toc | 559 | bl relative_toc |
560 | tovirt(r2,r2) | 560 | tovirt(r2,r2) |
561 | 561 | ||
562 | /* Copy some CPU settings from CPU 0 */ | 562 | /* Copy some CPU settings from CPU 0 */ |
563 | bl .__restore_cpu_ppc970 | 563 | bl __restore_cpu_ppc970 |
564 | 564 | ||
565 | /* pSeries do that early though I don't think we really need it */ | 565 | /* pSeries do that early though I don't think we really need it */ |
566 | mfmsr r3 | 566 | mfmsr r3 |
@@ -619,7 +619,7 @@ __secondary_start: | |||
619 | std r14,PACAKSAVE(r13) | 619 | std r14,PACAKSAVE(r13) |
620 | 620 | ||
621 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | 621 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
622 | bl .early_setup_secondary | 622 | bl early_setup_secondary |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * setup the new stack pointer, but *don't* use this until | 625 | * setup the new stack pointer, but *don't* use this until |
@@ -656,7 +656,7 @@ _GLOBAL(start_secondary_prolog) | |||
656 | ld r2,PACATOC(r13) | 656 | ld r2,PACATOC(r13) |
657 | li r3,0 | 657 | li r3,0 |
658 | std r3,0(r1) /* Zero the stack frame pointer */ | 658 | std r3,0(r1) /* Zero the stack frame pointer */ |
659 | bl .start_secondary | 659 | bl start_secondary |
660 | b . | 660 | b . |
661 | /* | 661 | /* |
662 | * Reset stack pointer and call start_secondary | 662 | * Reset stack pointer and call start_secondary |
@@ -667,7 +667,7 @@ _GLOBAL(start_secondary_resume) | |||
667 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ | 667 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ |
668 | li r3,0 | 668 | li r3,0 |
669 | std r3,0(r1) /* Zero the stack frame pointer */ | 669 | std r3,0(r1) /* Zero the stack frame pointer */ |
670 | bl .start_secondary | 670 | bl start_secondary |
671 | b . | 671 | b . |
672 | #endif | 672 | #endif |
673 | 673 | ||
@@ -717,7 +717,7 @@ p_toc: .llong __toc_start + 0x8000 - 0b | |||
717 | */ | 717 | */ |
718 | _INIT_STATIC(start_here_multiplatform) | 718 | _INIT_STATIC(start_here_multiplatform) |
719 | /* set up the TOC */ | 719 | /* set up the TOC */ |
720 | bl .relative_toc | 720 | bl relative_toc |
721 | tovirt(r2,r2) | 721 | tovirt(r2,r2) |
722 | 722 | ||
723 | /* Clear out the BSS. It may have been done in prom_init, | 723 | /* Clear out the BSS. It may have been done in prom_init, |
@@ -776,7 +776,7 @@ _INIT_STATIC(start_here_multiplatform) | |||
776 | 776 | ||
777 | /* Restore parameters passed from prom_init/kexec */ | 777 | /* Restore parameters passed from prom_init/kexec */ |
778 | mr r3,r31 | 778 | mr r3,r31 |
779 | bl .early_setup /* also sets r13 and SPRG_PACA */ | 779 | bl early_setup /* also sets r13 and SPRG_PACA */ |
780 | 780 | ||
781 | LOAD_REG_ADDR(r3, .start_here_common) | 781 | LOAD_REG_ADDR(r3, .start_here_common) |
782 | ld r4,PACAKMSR(r13) | 782 | ld r4,PACAKMSR(r13) |
@@ -794,7 +794,7 @@ _INIT_GLOBAL(start_here_common) | |||
794 | ld r2,PACATOC(r13) | 794 | ld r2,PACATOC(r13) |
795 | 795 | ||
796 | /* Do more system initializations in virtual mode */ | 796 | /* Do more system initializations in virtual mode */ |
797 | bl .setup_system | 797 | bl setup_system |
798 | 798 | ||
799 | /* Mark interrupts soft and hard disabled (they might be enabled | 799 | /* Mark interrupts soft and hard disabled (they might be enabled |
800 | * in the PACA when doing hotplug) | 800 | * in the PACA when doing hotplug) |
@@ -805,7 +805,7 @@ _INIT_GLOBAL(start_here_common) | |||
805 | stb r0,PACAIRQHAPPENED(r13) | 805 | stb r0,PACAIRQHAPPENED(r13) |
806 | 806 | ||
807 | /* Generic kernel entry */ | 807 | /* Generic kernel entry */ |
808 | bl .start_kernel | 808 | bl start_kernel |
809 | 809 | ||
810 | /* Not reached */ | 810 | /* Not reached */ |
811 | BUG_OPCODE | 811 | BUG_OPCODE |
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index bfb73cc209ce..48c21acef915 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S | |||
@@ -43,7 +43,7 @@ _GLOBAL(\name) | |||
43 | */ | 43 | */ |
44 | #ifdef CONFIG_TRACE_IRQFLAGS | 44 | #ifdef CONFIG_TRACE_IRQFLAGS |
45 | stdu r1,-128(r1) | 45 | stdu r1,-128(r1) |
46 | bl .trace_hardirqs_on | 46 | bl trace_hardirqs_on |
47 | addi r1,r1,128 | 47 | addi r1,r1,128 |
48 | #endif | 48 | #endif |
49 | li r0,1 | 49 | li r0,1 |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index e3edaa189911..f57a19348bdd 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) | |||
46 | mflr r0 | 46 | mflr r0 |
47 | std r0,16(r1) | 47 | std r0,16(r1) |
48 | stdu r1,-128(r1) | 48 | stdu r1,-128(r1) |
49 | bl .trace_hardirqs_on | 49 | bl trace_hardirqs_on |
50 | addi r1,r1,128 | 50 | addi r1,r1,128 |
51 | ld r0,16(r1) | 51 | ld r0,16(r1) |
52 | mtlr r0 | 52 | mtlr r0 |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c3ab86975614..dca6e16c2436 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common) | |||
58 | /* Make sure FPU, VSX etc... are flushed as we may lose | 58 | /* Make sure FPU, VSX etc... are flushed as we may lose |
59 | * state when going to nap mode | 59 | * state when going to nap mode |
60 | */ | 60 | */ |
61 | bl .discard_lazy_cpu_state | 61 | bl discard_lazy_cpu_state |
62 | #endif /* CONFIG_SMP */ | 62 | #endif /* CONFIG_SMP */ |
63 | 63 | ||
64 | /* Hard disable interrupts */ | 64 | /* Hard disable interrupts */ |
@@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss) | |||
168 | _GLOBAL(power7_wakeup_noloss) | 168 | _GLOBAL(power7_wakeup_noloss) |
169 | lbz r0,PACA_NAPSTATELOST(r13) | 169 | lbz r0,PACA_NAPSTATELOST(r13) |
170 | cmpwi r0,0 | 170 | cmpwi r0,0 |
171 | bne .power7_wakeup_loss | 171 | bne power7_wakeup_loss |
172 | ld r1,PACAR1(r13) | 172 | ld r1,PACAR1(r13) |
173 | ld r4,_MSR(r1) | 173 | ld r4,_MSR(r1) |
174 | ld r5,_NIP(r1) | 174 | ld r5,_NIP(r1) |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3d0249599d52..b39cf4afad4b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) | |||
34 | std r0,16(r1) | 34 | std r0,16(r1) |
35 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) | 35 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) |
36 | mr r1,r3 | 36 | mr r1,r3 |
37 | bl .__do_softirq | 37 | bl __do_softirq |
38 | ld r1,0(r1) | 38 | ld r1,0(r1) |
39 | ld r0,16(r1) | 39 | ld r0,16(r1) |
40 | mtlr r0 | 40 | mtlr r0 |
@@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) | |||
45 | std r0,16(r1) | 45 | std r0,16(r1) |
46 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) | 46 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) |
47 | mr r1,r4 | 47 | mr r1,r4 |
48 | bl .__do_irq | 48 | bl __do_irq |
49 | ld r1,0(r1) | 49 | ld r1,0(r1) |
50 | ld r0,16(r1) | 50 | ld r0,16(r1) |
51 | mtlr r0 | 51 | mtlr r0 |
@@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) | |||
506 | stb r4,PACAKEXECSTATE(r13) | 506 | stb r4,PACAKEXECSTATE(r13) |
507 | SYNC | 507 | SYNC |
508 | 508 | ||
509 | b .kexec_wait | 509 | b kexec_wait |
510 | 510 | ||
511 | /* | 511 | /* |
512 | * switch to real mode (turn mmu off) | 512 | * switch to real mode (turn mmu off) |
@@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) | |||
576 | 576 | ||
577 | /* copy dest pages, flush whole dest image */ | 577 | /* copy dest pages, flush whole dest image */ |
578 | mr r3,r29 | 578 | mr r3,r29 |
579 | bl .kexec_copy_flush /* (image) */ | 579 | bl kexec_copy_flush /* (image) */ |
580 | 580 | ||
581 | /* turn off mmu */ | 581 | /* turn off mmu */ |
582 | bl real_mode | 582 | bl real_mode |
@@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) | |||
586 | mr r4,r30 /* start, aka phys mem offset */ | 586 | mr r4,r30 /* start, aka phys mem offset */ |
587 | li r5,0x100 | 587 | li r5,0x100 |
588 | li r6,0 | 588 | li r6,0 |
589 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | 589 | bl copy_and_flush /* (dest, src, copy limit, start offset) */ |
590 | 1: /* assume normal blr return */ | 590 | 1: /* assume normal blr return */ |
591 | 591 | ||
592 | /* release other cpus to the new kernel secondary start at 0x60 */ | 592 | /* release other cpus to the new kernel secondary start at 0x60 */ |
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index e18e3cfc32de..8c86422a1e37 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
171 | #endif /* CONFIG_SMP */ | 171 | #endif /* CONFIG_SMP */ |
172 | 172 | ||
173 | /* Jump to partition switch code */ | 173 | /* Jump to partition switch code */ |
174 | bl .kvmppc_hv_entry_trampoline | 174 | bl kvmppc_hv_entry_trampoline |
175 | nop | 175 | nop |
176 | 176 | ||
177 | /* | 177 | /* |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffbb871c2bd8..7cfabe3881d8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1647,7 +1647,7 @@ kvmppc_hdsi: | |||
1647 | /* Search the hash table. */ | 1647 | /* Search the hash table. */ |
1648 | mr r3, r9 /* vcpu pointer */ | 1648 | mr r3, r9 /* vcpu pointer */ |
1649 | li r7, 1 /* data fault */ | 1649 | li r7, 1 /* data fault */ |
1650 | bl .kvmppc_hpte_hv_fault | 1650 | bl kvmppc_hpte_hv_fault |
1651 | ld r9, HSTATE_KVM_VCPU(r13) | 1651 | ld r9, HSTATE_KVM_VCPU(r13) |
1652 | ld r10, VCPU_PC(r9) | 1652 | ld r10, VCPU_PC(r9) |
1653 | ld r11, VCPU_MSR(r9) | 1653 | ld r11, VCPU_MSR(r9) |
@@ -1721,7 +1721,7 @@ kvmppc_hisi: | |||
1721 | mr r4, r10 | 1721 | mr r4, r10 |
1722 | mr r6, r11 | 1722 | mr r6, r11 |
1723 | li r7, 0 /* instruction fault */ | 1723 | li r7, 0 /* instruction fault */ |
1724 | bl .kvmppc_hpte_hv_fault | 1724 | bl kvmppc_hpte_hv_fault |
1725 | ld r9, HSTATE_KVM_VCPU(r13) | 1725 | ld r9, HSTATE_KVM_VCPU(r13) |
1726 | ld r10, VCPU_PC(r9) | 1726 | ld r10, VCPU_PC(r9) |
1727 | ld r11, VCPU_MSR(r9) | 1727 | ld r11, VCPU_MSR(r9) |
@@ -2099,7 +2099,7 @@ kvm_cede_exit: | |||
2099 | /* Try to handle a machine check in real mode */ | 2099 | /* Try to handle a machine check in real mode */ |
2100 | machine_check_realmode: | 2100 | machine_check_realmode: |
2101 | mr r3, r9 /* get vcpu pointer */ | 2101 | mr r3, r9 /* get vcpu pointer */ |
2102 | bl .kvmppc_realmode_machine_check | 2102 | bl kvmppc_realmode_machine_check |
2103 | nop | 2103 | nop |
2104 | cmpdi r3, 0 /* continue exiting from guest? */ | 2104 | cmpdi r3, 0 /* continue exiting from guest? */ |
2105 | ld r9, HSTATE_KVM_VCPU(r13) | 2105 | ld r9, HSTATE_KVM_VCPU(r13) |
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 9f9434a85264..e59c9c2ebe98 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S | |||
@@ -20,7 +20,7 @@ _GLOBAL(copy_page) | |||
20 | BEGIN_FTR_SECTION | 20 | BEGIN_FTR_SECTION |
21 | lis r5,PAGE_SIZE@h | 21 | lis r5,PAGE_SIZE@h |
22 | FTR_SECTION_ELSE | 22 | FTR_SECTION_ELSE |
23 | b .copypage_power7 | 23 | b copypage_power7 |
24 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) | 24 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) |
25 | ori r5,r5,PAGE_SIZE@l | 25 | ori r5,r5,PAGE_SIZE@l |
26 | BEGIN_FTR_SECTION | 26 | BEGIN_FTR_SECTION |
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 395c594722a2..0f1e2398f83c 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S | |||
@@ -60,7 +60,7 @@ _GLOBAL(copypage_power7) | |||
60 | std r4,56(r1) | 60 | std r4,56(r1) |
61 | std r0,16(r1) | 61 | std r0,16(r1) |
62 | stdu r1,-STACKFRAMESIZE(r1) | 62 | stdu r1,-STACKFRAMESIZE(r1) |
63 | bl .enter_vmx_copy | 63 | bl enter_vmx_copy |
64 | cmpwi r3,0 | 64 | cmpwi r3,0 |
65 | ld r0,STACKFRAMESIZE+16(r1) | 65 | ld r0,STACKFRAMESIZE+16(r1) |
66 | ld r3,STACKFRAMESIZE+48(r1) | 66 | ld r3,STACKFRAMESIZE+48(r1) |
@@ -103,7 +103,7 @@ _GLOBAL(copypage_power7) | |||
103 | addi r3,r3,128 | 103 | addi r3,r3,128 |
104 | bdnz 1b | 104 | bdnz 1b |
105 | 105 | ||
106 | b .exit_vmx_copy /* tail call optimise */ | 106 | b exit_vmx_copy /* tail call optimise */ |
107 | 107 | ||
108 | #else | 108 | #else |
109 | li r0,(PAGE_SIZE/128) | 109 | li r0,(PAGE_SIZE/128) |
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index e8e9c36dc784..62f0540418b9 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S | |||
@@ -66,7 +66,7 @@ | |||
66 | ld r15,STK_REG(R15)(r1) | 66 | ld r15,STK_REG(R15)(r1) |
67 | ld r14,STK_REG(R14)(r1) | 67 | ld r14,STK_REG(R14)(r1) |
68 | .Ldo_err3: | 68 | .Ldo_err3: |
69 | bl .exit_vmx_usercopy | 69 | bl exit_vmx_usercopy |
70 | ld r0,STACKFRAMESIZE+16(r1) | 70 | ld r0,STACKFRAMESIZE+16(r1) |
71 | mtlr r0 | 71 | mtlr r0 |
72 | b .Lexit | 72 | b .Lexit |
@@ -295,7 +295,7 @@ err1; stb r0,0(r3) | |||
295 | mflr r0 | 295 | mflr r0 |
296 | std r0,16(r1) | 296 | std r0,16(r1) |
297 | stdu r1,-STACKFRAMESIZE(r1) | 297 | stdu r1,-STACKFRAMESIZE(r1) |
298 | bl .enter_vmx_usercopy | 298 | bl enter_vmx_usercopy |
299 | cmpwi cr1,r3,0 | 299 | cmpwi cr1,r3,0 |
300 | ld r0,STACKFRAMESIZE+16(r1) | 300 | ld r0,STACKFRAMESIZE+16(r1) |
301 | ld r3,STACKFRAMESIZE+48(r1) | 301 | ld r3,STACKFRAMESIZE+48(r1) |
@@ -514,7 +514,7 @@ err3; lbz r0,0(r4) | |||
514 | err3; stb r0,0(r3) | 514 | err3; stb r0,0(r3) |
515 | 515 | ||
516 | 15: addi r1,r1,STACKFRAMESIZE | 516 | 15: addi r1,r1,STACKFRAMESIZE |
517 | b .exit_vmx_usercopy /* tail call optimise */ | 517 | b exit_vmx_usercopy /* tail call optimise */ |
518 | 518 | ||
519 | .Lvmx_unaligned_copy: | 519 | .Lvmx_unaligned_copy: |
520 | /* Get the destination 16B aligned */ | 520 | /* Get the destination 16B aligned */ |
@@ -717,5 +717,5 @@ err3; lbz r0,0(r4) | |||
717 | err3; stb r0,0(r3) | 717 | err3; stb r0,0(r3) |
718 | 718 | ||
719 | 15: addi r1,r1,STACKFRAMESIZE | 719 | 15: addi r1,r1,STACKFRAMESIZE |
720 | b .exit_vmx_usercopy /* tail call optimise */ | 720 | b exit_vmx_usercopy /* tail call optimise */ |
721 | #endif /* CONFiG_ALTIVEC */ | 721 | #endif /* CONFiG_ALTIVEC */ |
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 9b96ff2ecd4d..19e66001a4f9 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | _GLOBAL(__arch_hweight8) | 25 | _GLOBAL(__arch_hweight8) |
26 | BEGIN_FTR_SECTION | 26 | BEGIN_FTR_SECTION |
27 | b .__sw_hweight8 | 27 | b __sw_hweight8 |
28 | nop | 28 | nop |
29 | nop | 29 | nop |
30 | FTR_SECTION_ELSE | 30 | FTR_SECTION_ELSE |
@@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | |||
35 | 35 | ||
36 | _GLOBAL(__arch_hweight16) | 36 | _GLOBAL(__arch_hweight16) |
37 | BEGIN_FTR_SECTION | 37 | BEGIN_FTR_SECTION |
38 | b .__sw_hweight16 | 38 | b __sw_hweight16 |
39 | nop | 39 | nop |
40 | nop | 40 | nop |
41 | nop | 41 | nop |
@@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | |||
57 | 57 | ||
58 | _GLOBAL(__arch_hweight32) | 58 | _GLOBAL(__arch_hweight32) |
59 | BEGIN_FTR_SECTION | 59 | BEGIN_FTR_SECTION |
60 | b .__sw_hweight32 | 60 | b __sw_hweight32 |
61 | nop | 61 | nop |
62 | nop | 62 | nop |
63 | nop | 63 | nop |
@@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | |||
82 | 82 | ||
83 | _GLOBAL(__arch_hweight64) | 83 | _GLOBAL(__arch_hweight64) |
84 | BEGIN_FTR_SECTION | 84 | BEGIN_FTR_SECTION |
85 | b .__sw_hweight64 | 85 | b __sw_hweight64 |
86 | nop | 86 | nop |
87 | nop | 87 | nop |
88 | nop | 88 | nop |
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc6563..0738f96befbf 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S | |||
@@ -79,8 +79,8 @@ _GLOBAL(memset) | |||
79 | 79 | ||
80 | _GLOBAL(memmove) | 80 | _GLOBAL(memmove) |
81 | cmplw 0,r3,r4 | 81 | cmplw 0,r3,r4 |
82 | bgt .backwards_memcpy | 82 | bgt backwards_memcpy |
83 | b .memcpy | 83 | b memcpy |
84 | 84 | ||
85 | _GLOBAL(backwards_memcpy) | 85 | _GLOBAL(backwards_memcpy) |
86 | rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ | 86 | rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ |
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index e4177dbea6bd..bae3f214c2d9 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S | |||
@@ -230,7 +230,7 @@ _GLOBAL(memcpy_power7) | |||
230 | std r5,64(r1) | 230 | std r5,64(r1) |
231 | std r0,16(r1) | 231 | std r0,16(r1) |
232 | stdu r1,-STACKFRAMESIZE(r1) | 232 | stdu r1,-STACKFRAMESIZE(r1) |
233 | bl .enter_vmx_copy | 233 | bl enter_vmx_copy |
234 | cmpwi cr1,r3,0 | 234 | cmpwi cr1,r3,0 |
235 | ld r0,STACKFRAMESIZE+16(r1) | 235 | ld r0,STACKFRAMESIZE+16(r1) |
236 | ld r3,STACKFRAMESIZE+48(r1) | 236 | ld r3,STACKFRAMESIZE+48(r1) |
@@ -448,7 +448,7 @@ _GLOBAL(memcpy_power7) | |||
448 | 448 | ||
449 | 15: addi r1,r1,STACKFRAMESIZE | 449 | 15: addi r1,r1,STACKFRAMESIZE |
450 | ld r3,48(r1) | 450 | ld r3,48(r1) |
451 | b .exit_vmx_copy /* tail call optimise */ | 451 | b exit_vmx_copy /* tail call optimise */ |
452 | 452 | ||
453 | .Lvmx_unaligned_copy: | 453 | .Lvmx_unaligned_copy: |
454 | /* Get the destination 16B aligned */ | 454 | /* Get the destination 16B aligned */ |
@@ -652,5 +652,5 @@ _GLOBAL(memcpy_power7) | |||
652 | 652 | ||
653 | 15: addi r1,r1,STACKFRAMESIZE | 653 | 15: addi r1,r1,STACKFRAMESIZE |
654 | ld r3,48(r1) | 654 | ld r3,48(r1) |
655 | b .exit_vmx_copy /* tail call optimise */ | 655 | b exit_vmx_copy /* tail call optimise */ |
656 | #endif /* CONFiG_ALTIVEC */ | 656 | #endif /* CONFiG_ALTIVEC */ |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 1136d26a95ae..8bf7537a7f53 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
159 | BEGIN_FTR_SECTION | 159 | BEGIN_FTR_SECTION |
160 | mr r4,r30 | 160 | mr r4,r30 |
161 | mr r5,r7 | 161 | mr r5,r7 |
162 | bl .hash_page_do_lazy_icache | 162 | bl hash_page_do_lazy_icache |
163 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | 163 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) |
164 | 164 | ||
165 | /* At this point, r3 contains new PP bits, save them in | 165 | /* At this point, r3 contains new PP bits, save them in |
@@ -471,7 +471,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
471 | BEGIN_FTR_SECTION | 471 | BEGIN_FTR_SECTION |
472 | mr r4,r30 | 472 | mr r4,r30 |
473 | mr r5,r7 | 473 | mr r5,r7 |
474 | bl .hash_page_do_lazy_icache | 474 | bl hash_page_do_lazy_icache |
475 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | 475 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) |
476 | 476 | ||
477 | /* At this point, r3 contains new PP bits, save them in | 477 | /* At this point, r3 contains new PP bits, save them in |
@@ -588,7 +588,7 @@ htab_inval_old_hpte: | |||
588 | li r6,MMU_PAGE_64K /* psize */ | 588 | li r6,MMU_PAGE_64K /* psize */ |
589 | ld r7,STK_PARAM(R9)(r1) /* ssize */ | 589 | ld r7,STK_PARAM(R9)(r1) /* ssize */ |
590 | ld r8,STK_PARAM(R8)(r1) /* local */ | 590 | ld r8,STK_PARAM(R8)(r1) /* local */ |
591 | bl .flush_hash_page | 591 | bl flush_hash_page |
592 | /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ | 592 | /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ |
593 | lis r0,_PAGE_HPTE_SUB@h | 593 | lis r0,_PAGE_HPTE_SUB@h |
594 | ori r0,r0,_PAGE_HPTE_SUB@l | 594 | ori r0,r0,_PAGE_HPTE_SUB@l |
@@ -812,7 +812,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
812 | BEGIN_FTR_SECTION | 812 | BEGIN_FTR_SECTION |
813 | mr r4,r30 | 813 | mr r4,r30 |
814 | mr r5,r7 | 814 | mr r5,r7 |
815 | bl .hash_page_do_lazy_icache | 815 | bl hash_page_do_lazy_icache |
816 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | 816 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) |
817 | 817 | ||
818 | /* At this point, r3 contains new PP bits, save them in | 818 | /* At this point, r3 contains new PP bits, save them in |
diff --git a/arch/powerpc/platforms/pasemi/powersave.S b/arch/powerpc/platforms/pasemi/powersave.S index 56f45adcd089..81ab555aa491 100644 --- a/arch/powerpc/platforms/pasemi/powersave.S +++ b/arch/powerpc/platforms/pasemi/powersave.S | |||
@@ -66,7 +66,7 @@ sleep_common: | |||
66 | std r3, 48(r1) | 66 | std r3, 48(r1) |
67 | 67 | ||
68 | /* Only do power savings when in astate 0 */ | 68 | /* Only do power savings when in astate 0 */ |
69 | bl .check_astate | 69 | bl check_astate |
70 | cmpwi r3,0 | 70 | cmpwi r3,0 |
71 | bne 1f | 71 | bne 1f |
72 | 72 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 444fe7759e55..7891a86066e8 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \ | |||
49 | std r0,16(r1); \ | 49 | std r0,16(r1); \ |
50 | addi r4,r1,STK_PARAM(FIRST_REG); \ | 50 | addi r4,r1,STK_PARAM(FIRST_REG); \ |
51 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | 51 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
52 | bl .__trace_hcall_entry; \ | 52 | bl __trace_hcall_entry; \ |
53 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | 53 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
54 | ld r0,16(r1); \ | 54 | ld r0,16(r1); \ |
55 | ld r3,STK_PARAM(R3)(r1); \ | 55 | ld r3,STK_PARAM(R3)(r1); \ |
@@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \ | |||
83 | mr r3,r6; \ | 83 | mr r3,r6; \ |
84 | std r0,16(r1); \ | 84 | std r0,16(r1); \ |
85 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | 85 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
86 | bl .__trace_hcall_exit; \ | 86 | bl __trace_hcall_exit; \ |
87 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | 87 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
88 | ld r0,16(r1); \ | 88 | ld r0,16(r1); \ |
89 | ld r3,STK_PARAM(R3)(r1); \ | 89 | ld r3,STK_PARAM(R3)(r1); \ |