aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c10
-rw-r--r--arch/powerpc/kernel/entry_64.S89
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S77
-rw-r--r--arch/powerpc/kernel/fpu.S73
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S42
-rw-r--r--arch/powerpc/kernel/idle_power7.S7
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/module_32.c6
-rw-r--r--arch/powerpc/kernel/module_64.c27
-rw-r--r--arch/powerpc/kernel/nvram_64.c19
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/process.c541
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/rtas.c59
-rw-r--r--arch/powerpc/kernel/signal_32.c22
-rw-r--r--arch/powerpc/kernel/signal_64.c22
-rw-r--r--arch/powerpc/kernel/stacktrace.c7
-rw-r--r--arch/powerpc/kernel/swsusp.c4
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/systbl_chk.sh2
-rw-r--r--arch/powerpc/kernel/time.c36
-rw-r--r--arch/powerpc/kernel/traps.c7
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vdso32/datapage.S2
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S2
-rw-r--r--arch/powerpc/kernel/vector.S112
28 files changed, 484 insertions, 698 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 86150fbb42c3..8e7cb8e2b21a 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
960 preempt_disable(); 960 preempt_disable();
961 enable_kernel_fp(); 961 enable_kernel_fp();
962 cvt_df(&data.dd, (float *)&data.x32.low32); 962 cvt_df(&data.dd, (float *)&data.x32.low32);
963 disable_kernel_fp();
963 preempt_enable(); 964 preempt_enable();
964#else 965#else
965 return 0; 966 return 0;
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
1000 preempt_disable(); 1001 preempt_disable();
1001 enable_kernel_fp(); 1002 enable_kernel_fp();
1002 cvt_fd((float *)&data.x32.low32, &data.dd); 1003 cvt_fd((float *)&data.x32.low32, &data.dd);
1004 disable_kernel_fp();
1003 preempt_enable(); 1005 preempt_enable();
1004#else 1006#else
1005 return 0; 1007 return 0;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 221d584d089f..07cebc3514f3 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,14 +185,16 @@ int main(void)
185 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 185 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
186 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 186 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
187 DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); 187 DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
188 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 188#ifdef CONFIG_PPC_BOOK3S
189 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id));
189#ifdef CONFIG_PPC_MM_SLICES 190#ifdef CONFIG_PPC_MM_SLICES
190 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 191 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
191 context.low_slices_psize)); 192 mm_ctx_low_slices_psize));
192 DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, 193 DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
193 context.high_slices_psize)); 194 mm_ctx_high_slices_psize));
194 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); 195 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
195#endif /* CONFIG_PPC_MM_SLICES */ 196#endif /* CONFIG_PPC_MM_SLICES */
197#endif
196 198
197#ifdef CONFIG_PPC_BOOK3E 199#ifdef CONFIG_PPC_BOOK3E
198 DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); 200 DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
@@ -222,7 +224,7 @@ int main(void)
222#ifdef CONFIG_PPC_MM_SLICES 224#ifdef CONFIG_PPC_MM_SLICES
223 DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); 225 DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
224#else 226#else
225 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); 227 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp));
226#endif /* CONFIG_PPC_MM_SLICES */ 228#endif /* CONFIG_PPC_MM_SLICES */
227 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 229 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
228 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 230 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index a94f155db78e..0d525ce3717f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -223,7 +223,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
223 223
224 beq- 1f 224 beq- 1f
225 ACCOUNT_CPU_USER_EXIT(r11, r12) 225 ACCOUNT_CPU_USER_EXIT(r11, r12)
226 HMT_MEDIUM_LOW_HAS_PPR 226
227BEGIN_FTR_SECTION
228 HMT_MEDIUM_LOW
229END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
230
227 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 231 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
2281: ld r2,GPR2(r1) 2321: ld r2,GPR2(r1)
229 ld r1,GPR1(r1) 233 ld r1,GPR1(r1)
@@ -312,7 +316,13 @@ syscall_exit_work:
312 subi r12,r12,TI_FLAGS 316 subi r12,r12,TI_FLAGS
313 317
3144: /* Anything else left to do? */ 3184: /* Anything else left to do? */
315 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ 319BEGIN_FTR_SECTION
320 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
321 ld r10,PACACURRENT(r13)
322 sldi r3,r3,32 /* bits 11-13 are used for ppr */
323 std r3,TASKTHREADPPR(r10)
324END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
325
316 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) 326 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
317 beq ret_from_except_lite 327 beq ret_from_except_lite
318 328
@@ -452,43 +462,11 @@ _GLOBAL(_switch)
452 /* r3-r13 are caller saved -- Cort */ 462 /* r3-r13 are caller saved -- Cort */
453 SAVE_8GPRS(14, r1) 463 SAVE_8GPRS(14, r1)
454 SAVE_10GPRS(22, r1) 464 SAVE_10GPRS(22, r1)
455 mflr r20 /* Return to switch caller */ 465 std r0,_NIP(r1) /* Return to switch caller */
456 mfmsr r22
457 li r0, MSR_FP
458#ifdef CONFIG_VSX
459BEGIN_FTR_SECTION
460 oris r0,r0,MSR_VSX@h /* Disable VSX */
461END_FTR_SECTION_IFSET(CPU_FTR_VSX)
462#endif /* CONFIG_VSX */
463#ifdef CONFIG_ALTIVEC
464BEGIN_FTR_SECTION
465 oris r0,r0,MSR_VEC@h /* Disable altivec */
466 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
467 std r24,THREAD_VRSAVE(r3)
468END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
469#endif /* CONFIG_ALTIVEC */
470 and. r0,r0,r22
471 beq+ 1f
472 andc r22,r22,r0
473 MTMSRD(r22)
474 isync
4751: std r20,_NIP(r1)
476 mfcr r23 466 mfcr r23
477 std r23,_CCR(r1) 467 std r23,_CCR(r1)
478 std r1,KSP(r3) /* Set old stack pointer */ 468 std r1,KSP(r3) /* Set old stack pointer */
479 469
480#ifdef CONFIG_PPC_BOOK3S_64
481BEGIN_FTR_SECTION
482 /* Event based branch registers */
483 mfspr r0, SPRN_BESCR
484 std r0, THREAD_BESCR(r3)
485 mfspr r0, SPRN_EBBHR
486 std r0, THREAD_EBBHR(r3)
487 mfspr r0, SPRN_EBBRR
488 std r0, THREAD_EBBRR(r3)
489END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
490#endif
491
492#ifdef CONFIG_SMP 470#ifdef CONFIG_SMP
493 /* We need a sync somewhere here to make sure that if the 471 /* We need a sync somewhere here to make sure that if the
494 * previous task gets rescheduled on another CPU, it sees all 472 * previous task gets rescheduled on another CPU, it sees all
@@ -576,47 +554,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
576 mr r1,r8 /* start using new stack pointer */ 554 mr r1,r8 /* start using new stack pointer */
577 std r7,PACAKSAVE(r13) 555 std r7,PACAKSAVE(r13)
578 556
579#ifdef CONFIG_PPC_BOOK3S_64
580BEGIN_FTR_SECTION
581 /* Event based branch registers */
582 ld r0, THREAD_BESCR(r4)
583 mtspr SPRN_BESCR, r0
584 ld r0, THREAD_EBBHR(r4)
585 mtspr SPRN_EBBHR, r0
586 ld r0, THREAD_EBBRR(r4)
587 mtspr SPRN_EBBRR, r0
588
589 ld r0,THREAD_TAR(r4)
590 mtspr SPRN_TAR,r0
591END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
592#endif
593
594#ifdef CONFIG_ALTIVEC
595BEGIN_FTR_SECTION
596 ld r0,THREAD_VRSAVE(r4)
597 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
598END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
599#endif /* CONFIG_ALTIVEC */
600#ifdef CONFIG_PPC64
601BEGIN_FTR_SECTION
602 lwz r6,THREAD_DSCR_INHERIT(r4)
603 ld r0,THREAD_DSCR(r4)
604 cmpwi r6,0
605 bne 1f
606 ld r0,PACA_DSCR_DEFAULT(r13)
6071:
608BEGIN_FTR_SECTION_NESTED(70)
609 mfspr r8, SPRN_FSCR
610 rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
611 mtspr SPRN_FSCR, r8
612END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
613 cmpd r0,r25
614 beq 2f
615 mtspr SPRN_DSCR,r0
6162:
617END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
618#endif
619
620 ld r6,_CCR(r1) 557 ld r6,_CCR(r1)
621 mtcrf 0xFF,r6 558 mtcrf 0xFF,r6
622 559
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0a0399c2af11..7716cebf4b8e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -96,7 +96,6 @@ __start_interrupts:
96 96
97 .globl system_reset_pSeries; 97 .globl system_reset_pSeries;
98system_reset_pSeries: 98system_reset_pSeries:
99 HMT_MEDIUM_PPR_DISCARD
100 SET_SCRATCH0(r13) 99 SET_SCRATCH0(r13)
101#ifdef CONFIG_PPC_P7_NAP 100#ifdef CONFIG_PPC_P7_NAP
102BEGIN_FTR_SECTION 101BEGIN_FTR_SECTION
@@ -164,7 +163,6 @@ machine_check_pSeries_1:
164 * some code path might still want to branch into the original 163 * some code path might still want to branch into the original
165 * vector 164 * vector
166 */ 165 */
167 HMT_MEDIUM_PPR_DISCARD
168 SET_SCRATCH0(r13) /* save r13 */ 166 SET_SCRATCH0(r13) /* save r13 */
169#ifdef CONFIG_PPC_P7_NAP 167#ifdef CONFIG_PPC_P7_NAP
170BEGIN_FTR_SECTION 168BEGIN_FTR_SECTION
@@ -199,7 +197,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
199 . = 0x300 197 . = 0x300
200 .globl data_access_pSeries 198 .globl data_access_pSeries
201data_access_pSeries: 199data_access_pSeries:
202 HMT_MEDIUM_PPR_DISCARD
203 SET_SCRATCH0(r13) 200 SET_SCRATCH0(r13)
204 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 201 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
205 KVMTEST, 0x300) 202 KVMTEST, 0x300)
@@ -207,7 +204,6 @@ data_access_pSeries:
207 . = 0x380 204 . = 0x380
208 .globl data_access_slb_pSeries 205 .globl data_access_slb_pSeries
209data_access_slb_pSeries: 206data_access_slb_pSeries:
210 HMT_MEDIUM_PPR_DISCARD
211 SET_SCRATCH0(r13) 207 SET_SCRATCH0(r13)
212 EXCEPTION_PROLOG_0(PACA_EXSLB) 208 EXCEPTION_PROLOG_0(PACA_EXSLB)
213 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 209 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
@@ -234,15 +230,14 @@ data_access_slb_pSeries:
234 bctr 230 bctr
235#endif 231#endif
236 232
237 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) 233 STD_EXCEPTION_PSERIES(0x400, instruction_access)
238 234
239 . = 0x480 235 . = 0x480
240 .globl instruction_access_slb_pSeries 236 .globl instruction_access_slb_pSeries
241instruction_access_slb_pSeries: 237instruction_access_slb_pSeries:
242 HMT_MEDIUM_PPR_DISCARD
243 SET_SCRATCH0(r13) 238 SET_SCRATCH0(r13)
244 EXCEPTION_PROLOG_0(PACA_EXSLB) 239 EXCEPTION_PROLOG_0(PACA_EXSLB)
245 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) 240 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
246 std r3,PACA_EXSLB+EX_R3(r13) 241 std r3,PACA_EXSLB+EX_R3(r13)
247 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 242 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
248#ifdef __DISABLED__ 243#ifdef __DISABLED__
@@ -269,25 +264,24 @@ instruction_access_slb_pSeries:
269 .globl hardware_interrupt_hv; 264 .globl hardware_interrupt_hv;
270hardware_interrupt_pSeries: 265hardware_interrupt_pSeries:
271hardware_interrupt_hv: 266hardware_interrupt_hv:
272 HMT_MEDIUM_PPR_DISCARD
273 BEGIN_FTR_SECTION 267 BEGIN_FTR_SECTION
274 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 268 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
275 EXC_HV, SOFTEN_TEST_HV) 269 EXC_HV, SOFTEN_TEST_HV)
276 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 270 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
277 FTR_SECTION_ELSE 271 FTR_SECTION_ELSE
278 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 272 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
279 EXC_STD, SOFTEN_TEST_HV_201) 273 EXC_STD, SOFTEN_TEST_PR)
280 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 274 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
281 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 275 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
282 276
283 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 277 STD_EXCEPTION_PSERIES(0x600, alignment)
284 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) 278 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
285 279
286 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 280 STD_EXCEPTION_PSERIES(0x700, program_check)
287 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) 281 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
288 282
289 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 283 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
290 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) 284 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
291 285
292 . = 0x900 286 . = 0x900
293 .globl decrementer_pSeries 287 .globl decrementer_pSeries
@@ -297,10 +291,10 @@ decrementer_pSeries:
297 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) 291 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
298 292
299 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) 293 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
300 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) 294 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
301 295
302 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 296 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
303 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) 297 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
304 298
305 . = 0xc00 299 . = 0xc00
306 .globl system_call_pSeries 300 .globl system_call_pSeries
@@ -331,8 +325,8 @@ system_call_pSeries:
331 SYSCALL_PSERIES_3 325 SYSCALL_PSERIES_3
332 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) 326 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
333 327
334 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 328 STD_EXCEPTION_PSERIES(0xd00, single_step)
335 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) 329 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
336 330
337 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 331 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
338 * out of line to handle them 332 * out of line to handle them
@@ -407,13 +401,12 @@ hv_facility_unavailable_trampoline:
407 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 401 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
408#endif /* CONFIG_CBE_RAS */ 402#endif /* CONFIG_CBE_RAS */
409 403
410 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 404 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
411 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 405 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
412 406
413 . = 0x1500 407 . = 0x1500
414 .global denorm_exception_hv 408 .global denorm_exception_hv
415denorm_exception_hv: 409denorm_exception_hv:
416 HMT_MEDIUM_PPR_DISCARD
417 mtspr SPRN_SPRG_HSCRATCH0,r13 410 mtspr SPRN_SPRG_HSCRATCH0,r13
418 EXCEPTION_PROLOG_0(PACA_EXGEN) 411 EXCEPTION_PROLOG_0(PACA_EXGEN)
419 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) 412 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
@@ -435,8 +428,8 @@ denorm_exception_hv:
435 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 428 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
436#endif /* CONFIG_CBE_RAS */ 429#endif /* CONFIG_CBE_RAS */
437 430
438 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 431 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
439 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) 432 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
440 433
441#ifdef CONFIG_CBE_RAS 434#ifdef CONFIG_CBE_RAS
442 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 435 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
@@ -527,7 +520,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
527machine_check_pSeries: 520machine_check_pSeries:
528 .globl machine_check_fwnmi 521 .globl machine_check_fwnmi
529machine_check_fwnmi: 522machine_check_fwnmi:
530 HMT_MEDIUM_PPR_DISCARD
531 SET_SCRATCH0(r13) /* save r13 */ 523 SET_SCRATCH0(r13) /* save r13 */
532 EXCEPTION_PROLOG_0(PACA_EXMC) 524 EXCEPTION_PROLOG_0(PACA_EXMC)
533machine_check_pSeries_0: 525machine_check_pSeries_0:
@@ -536,9 +528,9 @@ machine_check_pSeries_0:
536 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 528 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
537 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 529 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
538 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 530 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
539 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 531 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
540 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 532 KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
541 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 533 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
542 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 534 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
543 535
544#ifdef CONFIG_PPC_DENORMALISATION 536#ifdef CONFIG_PPC_DENORMALISATION
@@ -621,13 +613,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
621 613
622 /* moved from 0xf00 */ 614 /* moved from 0xf00 */
623 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) 615 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
624 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) 616 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
625 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 617 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
626 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) 618 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
627 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 619 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
628 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 620 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
629 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 621 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
630 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) 622 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
631 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) 623 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
632 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) 624 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
633 625
@@ -711,7 +703,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
711 .globl system_reset_fwnmi 703 .globl system_reset_fwnmi
712 .align 7 704 .align 7
713system_reset_fwnmi: 705system_reset_fwnmi:
714 HMT_MEDIUM_PPR_DISCARD
715 SET_SCRATCH0(r13) /* save r13 */ 706 SET_SCRATCH0(r13) /* save r13 */
716 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 707 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
717 NOTEST, 0x100) 708 NOTEST, 0x100)
@@ -1556,29 +1547,19 @@ do_hash_page:
1556 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1547 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1557 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1548 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1558 bne 77f /* then don't call hash_page now */ 1549 bne 77f /* then don't call hash_page now */
1559 /*
1560 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1561 * accessing a userspace segment (even from the kernel). We assume
1562 * kernel addresses always have the high bit set.
1563 */
1564 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1565 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1566 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1567 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1568 ori r4,r4,1 /* add _PAGE_PRESENT */
1569 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1570 1550
1571 /* 1551 /*
1572 * r3 contains the faulting address 1552 * r3 contains the faulting address
1573 * r4 contains the required access permissions 1553 * r4 msr
1574 * r5 contains the trap number 1554 * r5 contains the trap number
1575 * r6 contains dsisr 1555 * r6 contains dsisr
1576 * 1556 *
1577 * at return r3 = 0 for success, 1 for page fault, negative for error 1557 * at return r3 = 0 for success, 1 for page fault, negative for error
1578 */ 1558 */
1559 mr r4,r12
1579 ld r6,_DSISR(r1) 1560 ld r6,_DSISR(r1)
1580 bl hash_page /* build HPTE if possible */ 1561 bl __hash_page /* build HPTE if possible */
1581 cmpdi r3,0 /* see if hash_page succeeded */ 1562 cmpdi r3,0 /* see if __hash_page succeeded */
1582 1563
1583 /* Success */ 1564 /* Success */
1584 beq fast_exc_return_irq /* Return from exception on success */ 1565 beq fast_exc_return_irq /* Return from exception on success */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 9ad236e5d2c9..2117eaca3d28 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -73,30 +73,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
73 MTFSF_L(fr0) 73 MTFSF_L(fr0)
74 REST_32FPVSRS(0, R4, R7) 74 REST_32FPVSRS(0, R4, R7)
75 75
76 /* FP/VSX off again */
77 MTMSRD(r6)
78 SYNC
79
80 blr 76 blr
81#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 77#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
82 78
83/* 79/*
84 * Enable use of the FPU, and VSX if possible, for the caller.
85 */
86_GLOBAL(fp_enable)
87 mfmsr r3
88 ori r3,r3,MSR_FP
89#ifdef CONFIG_VSX
90BEGIN_FTR_SECTION
91 oris r3,r3,MSR_VSX@h
92END_FTR_SECTION_IFSET(CPU_FTR_VSX)
93#endif
94 SYNC
95 MTMSRD(r3)
96 isync /* (not necessary for arch 2.02 and later) */
97 blr
98
99/*
100 * Load state from memory into FP registers including FPSCR. 80 * Load state from memory into FP registers including FPSCR.
101 * Assumes the caller has enabled FP in the MSR. 81 * Assumes the caller has enabled FP in the MSR.
102 */ 82 */
@@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
136 SYNC 116 SYNC
137 MTMSRD(r5) /* enable use of fpu now */ 117 MTMSRD(r5) /* enable use of fpu now */
138 isync 118 isync
139/*
140 * For SMP, we don't do lazy FPU switching because it just gets too
141 * horrendously complex, especially when a task switches from one CPU
142 * to another. Instead we call giveup_fpu in switch_to.
143 */
144#ifndef CONFIG_SMP
145 LOAD_REG_ADDRBASE(r3, last_task_used_math)
146 toreal(r3)
147 PPC_LL r4,ADDROFF(last_task_used_math)(r3)
148 PPC_LCMPI 0,r4,0
149 beq 1f
150 toreal(r4)
151 addi r4,r4,THREAD /* want last_task_used_math->thread */
152 addi r10,r4,THREAD_FPSTATE
153 SAVE_32FPVSRS(0, R5, R10)
154 mffs fr0
155 stfd fr0,FPSTATE_FPSCR(r10)
156 PPC_LL r5,PT_REGS(r4)
157 toreal(r5)
158 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
159 li r10,MSR_FP|MSR_FE0|MSR_FE1
160 andc r4,r4,r10 /* disable FP for previous task */
161 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1621:
163#endif /* CONFIG_SMP */
164 /* enable use of FP after return */ 119 /* enable use of FP after return */
165#ifdef CONFIG_PPC32 120#ifdef CONFIG_PPC32
166 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 121 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
@@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
179 lfd fr0,FPSTATE_FPSCR(r10) 134 lfd fr0,FPSTATE_FPSCR(r10)
180 MTFSF_L(fr0) 135 MTFSF_L(fr0)
181 REST_32FPVSRS(0, R4, R10) 136 REST_32FPVSRS(0, R4, R10)
182#ifndef CONFIG_SMP
183 subi r4,r5,THREAD
184 fromreal(r4)
185 PPC_STL r4,ADDROFF(last_task_used_math)(r3)
186#endif /* CONFIG_SMP */
187 /* restore registers and return */ 137 /* restore registers and return */
188 /* we haven't used ctr or xer or lr */ 138 /* we haven't used ctr or xer or lr */
189 blr 139 blr
190 140
191/* 141/*
192 * giveup_fpu(tsk) 142 * __giveup_fpu(tsk)
193 * Disable FP for the task given as the argument, 143 * Disable FP for the task given as the argument,
194 * and save the floating-point registers in its thread_struct. 144 * and save the floating-point registers in its thread_struct.
195 * Enables the FPU for use in the kernel on return. 145 * Enables the FPU for use in the kernel on return.
196 */ 146 */
197_GLOBAL(giveup_fpu) 147_GLOBAL(__giveup_fpu)
198 mfmsr r5
199 ori r5,r5,MSR_FP
200#ifdef CONFIG_VSX
201BEGIN_FTR_SECTION
202 oris r5,r5,MSR_VSX@h
203END_FTR_SECTION_IFSET(CPU_FTR_VSX)
204#endif
205 SYNC_601
206 ISYNC_601
207 MTMSRD(r5) /* enable use of fpu now */
208 SYNC_601
209 isync
210 PPC_LCMPI 0,r3,0
211 beqlr- /* if no previous owner, done */
212 addi r3,r3,THREAD /* want THREAD of task */ 148 addi r3,r3,THREAD /* want THREAD of task */
213 PPC_LL r6,THREAD_FPSAVEAREA(r3) 149 PPC_LL r6,THREAD_FPSAVEAREA(r3)
214 PPC_LL r5,PT_REGS(r3) 150 PPC_LL r5,PT_REGS(r3)
@@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
230 andc r4,r4,r3 /* disable FP for previous task */ 166 andc r4,r4,r3 /* disable FP for previous task */
231 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 167 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2321: 1681:
233#ifndef CONFIG_SMP
234 li r5,0
235 LOAD_REG_ADDRBASE(r4,last_task_used_math)
236 PPC_STL r5,ADDROFF(last_task_used_math)(r4)
237#endif /* CONFIG_SMP */
238 blr 169 blr
239 170
240/* 171/*
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fffd1f96bb1d..f705171b924b 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -857,29 +857,6 @@ _GLOBAL(load_up_spe)
857 oris r5,r5,MSR_SPE@h 857 oris r5,r5,MSR_SPE@h
858 mtmsr r5 /* enable use of SPE now */ 858 mtmsr r5 /* enable use of SPE now */
859 isync 859 isync
860/*
861 * For SMP, we don't do lazy SPE switching because it just gets too
862 * horrendously complex, especially when a task switches from one CPU
863 * to another. Instead we call giveup_spe in switch_to.
864 */
865#ifndef CONFIG_SMP
866 lis r3,last_task_used_spe@ha
867 lwz r4,last_task_used_spe@l(r3)
868 cmpi 0,r4,0
869 beq 1f
870 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
871 SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
872 evxor evr10, evr10, evr10 /* clear out evr10 */
873 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
874 li r5,THREAD_ACC
875 evstddx evr10, r4, r5 /* save off accumulator */
876 lwz r5,PT_REGS(r4)
877 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
878 lis r10,MSR_SPE@h
879 andc r4,r4,r10 /* disable SPE for previous task */
880 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8811:
882#endif /* !CONFIG_SMP */
883 /* enable use of SPE after return */ 860 /* enable use of SPE after return */
884 oris r9,r9,MSR_SPE@h 861 oris r9,r9,MSR_SPE@h
885 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 862 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
@@ -889,10 +866,6 @@ _GLOBAL(load_up_spe)
889 evlddx evr4,r10,r5 866 evlddx evr4,r10,r5
890 evmra evr4,evr4 867 evmra evr4,evr4
891 REST_32EVRS(0,r10,r5,THREAD_EVR0) 868 REST_32EVRS(0,r10,r5,THREAD_EVR0)
892#ifndef CONFIG_SMP
893 subi r4,r5,THREAD
894 stw r4,last_task_used_spe@l(r3)
895#endif /* !CONFIG_SMP */
896 blr 869 blr
897 870
898/* 871/*
@@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
1011 984
1012#ifdef CONFIG_SPE 985#ifdef CONFIG_SPE
1013/* 986/*
1014 * extern void giveup_spe(struct task_struct *prev) 987 * extern void __giveup_spe(struct task_struct *prev)
1015 * 988 *
1016 */ 989 */
1017_GLOBAL(giveup_spe) 990_GLOBAL(__giveup_spe)
1018 mfmsr r5
1019 oris r5,r5,MSR_SPE@h
1020 mtmsr r5 /* enable use of SPE now */
1021 isync
1022 cmpi 0,r3,0
1023 beqlr- /* if no previous owner, done */
1024 addi r3,r3,THREAD /* want THREAD of task */ 991 addi r3,r3,THREAD /* want THREAD of task */
1025 lwz r5,PT_REGS(r3) 992 lwz r5,PT_REGS(r3)
1026 cmpi 0,r5,0 993 cmpi 0,r5,0
@@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe)
1035 andc r4,r4,r3 /* disable SPE for previous task */ 1002 andc r4,r4,r3 /* disable SPE for previous task */
1036 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1003 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
10371: 10041:
1038#ifndef CONFIG_SMP
1039 li r5,0
1040 lis r4,last_task_used_spe@ha
1041 stw r5,last_task_used_spe@l(r4)
1042#endif /* !CONFIG_SMP */
1043 blr 1005 blr
1044#endif /* CONFIG_SPE */ 1006#endif /* CONFIG_SPE */
1045 1007
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 112ccf497562..cf4fb5429cf1 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common)
89 std r0,_LINK(r1) 89 std r0,_LINK(r1)
90 std r0,_NIP(r1) 90 std r0,_NIP(r1)
91 91
92#ifndef CONFIG_SMP
93 /* Make sure FPU, VSX etc... are flushed as we may lose
94 * state when going to nap mode
95 */
96 bl discard_lazy_cpu_state
97#endif /* CONFIG_SMP */
98
99 /* Hard disable interrupts */ 92 /* Hard disable interrupts */
100 mfmsr r9 93 mfmsr r9
101 rldicl r9,r9,48,1 94 rldicl r9,r9,48,1
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index ed3ab509faca..be8edd67f05b 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -743,6 +743,8 @@ relocate_new_kernel:
743 /* Check for 47x cores */ 743 /* Check for 47x cores */
744 mfspr r3,SPRN_PVR 744 mfspr r3,SPRN_PVR
745 srwi r3,r3,16 745 srwi r3,r3,16
746 cmplwi cr0,r3,PVR_476FPE@h
747 beq setup_map_47x
746 cmplwi cr0,r3,PVR_476@h 748 cmplwi cr0,r3,PVR_476@h
747 beq setup_map_47x 749 beq setup_map_47x
748 cmplwi cr0,r3,PVR_476_ISS@h 750 cmplwi cr0,r3,PVR_476_ISS@h
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c94d2e018d84..2c01665eb410 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location,
188 188
189 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); 189 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
190 /* Init, or core PLT? */ 190 /* Init, or core PLT? */
191 if (location >= mod->module_core 191 if (location >= mod->core_layout.base
192 && location < mod->module_core + mod->core_size) 192 && location < mod->core_layout.base + mod->core_layout.size)
193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; 193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
194 else 194 else
195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; 195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
@@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
296 } 296 }
297#ifdef CONFIG_DYNAMIC_FTRACE 297#ifdef CONFIG_DYNAMIC_FTRACE
298 module->arch.tramp = 298 module->arch.tramp =
299 do_plt_call(module->module_core, 299 do_plt_call(module->core_layout.base,
300 (unsigned long)ftrace_caller, 300 (unsigned long)ftrace_caller,
301 sechdrs, module); 301 sechdrs, module);
302#endif 302#endif
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 68384514506b..59663af9315f 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
635 */ 635 */
636 break; 636 break;
637 637
638 case R_PPC64_ENTRY:
639 /*
640 * Optimize ELFv2 large code model entry point if
641 * the TOC is within 2GB range of current location.
642 */
643 value = my_r2(sechdrs, me) - (unsigned long)location;
644 if (value + 0x80008000 > 0xffffffff)
645 break;
646 /*
647 * Check for the large code model prolog sequence:
648 * ld r2, ...(r12)
649 * add r2, r2, r12
650 */
651 if ((((uint32_t *)location)[0] & ~0xfffc)
652 != 0xe84c0000)
653 break;
654 if (((uint32_t *)location)[1] != 0x7c426214)
655 break;
656 /*
657 * If found, replace it with:
658 * addis r2, r12, (.TOC.-func)@ha
659 * addi r2, r12, (.TOC.-func)@l
660 */
661 ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
662 ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
663 break;
664
638 case R_PPC64_REL16_HA: 665 case R_PPC64_REL16_HA:
639 /* Subtract location pointer */ 666 /* Subtract location pointer */
640 value -= (unsigned long)location; 667 value -= (unsigned long)location;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 32e26526f7e4..0cab9e8c3794 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/kmsg_dump.h> 29#include <linux/kmsg_dump.h>
30#include <linux/pagemap.h>
30#include <linux/pstore.h> 31#include <linux/pstore.h>
31#include <linux/zlib.h> 32#include <linux/zlib.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
@@ -733,24 +734,10 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
733 734
734static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) 735static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
735{ 736{
736 int size;
737
738 if (ppc_md.nvram_size == NULL) 737 if (ppc_md.nvram_size == NULL)
739 return -ENODEV; 738 return -ENODEV;
740 size = ppc_md.nvram_size(); 739 return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
741 740 ppc_md.nvram_size());
742 switch (origin) {
743 case 1:
744 offset += file->f_pos;
745 break;
746 case 2:
747 offset += size;
748 break;
749 }
750 if (offset < 0)
751 return -EINVAL;
752 file->f_pos = offset;
753 return file->f_pos;
754} 741}
755 742
756 743
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 202963ee013a..41e1607e800c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
19#endif 19#endif
20 20
21#ifdef CONFIG_PPC_FPU 21#ifdef CONFIG_PPC_FPU
22EXPORT_SYMBOL(giveup_fpu);
23EXPORT_SYMBOL(load_fp_state); 22EXPORT_SYMBOL(load_fp_state);
24EXPORT_SYMBOL(store_fp_state); 23EXPORT_SYMBOL(store_fp_state);
25#endif 24#endif
26 25
27#ifdef CONFIG_ALTIVEC 26#ifdef CONFIG_ALTIVEC
28EXPORT_SYMBOL(giveup_altivec);
29EXPORT_SYMBOL(load_vr_state); 27EXPORT_SYMBOL(load_vr_state);
30EXPORT_SYMBOL(store_vr_state); 28EXPORT_SYMBOL(store_vr_state);
31#endif 29#endif
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
34EXPORT_SYMBOL_GPL(__giveup_vsx); 32EXPORT_SYMBOL_GPL(__giveup_vsx);
35#endif 33#endif
36 34
37#ifdef CONFIG_SPE
38EXPORT_SYMBOL(giveup_spe);
39#endif
40
41#ifdef CONFIG_EPAPR_PARAVIRT 35#ifdef CONFIG_EPAPR_PARAVIRT
42EXPORT_SYMBOL(epapr_hypercall_start); 36EXPORT_SYMBOL(epapr_hypercall_start);
43#endif 37#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 646bf4d222c1..dccc87e8fee5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -67,15 +67,8 @@
67 67
68extern unsigned long _get_SP(void); 68extern unsigned long _get_SP(void);
69 69
70#ifndef CONFIG_SMP
71struct task_struct *last_task_used_math = NULL;
72struct task_struct *last_task_used_altivec = NULL;
73struct task_struct *last_task_used_vsx = NULL;
74struct task_struct *last_task_used_spe = NULL;
75#endif
76
77#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 70#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
78void giveup_fpu_maybe_transactional(struct task_struct *tsk) 71static void check_if_tm_restore_required(struct task_struct *tsk)
79{ 72{
80 /* 73 /*
81 * If we are saving the current thread's registers, and the 74 * If we are saving the current thread's registers, and the
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
89 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; 82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
90 set_thread_flag(TIF_RESTORE_TM); 83 set_thread_flag(TIF_RESTORE_TM);
91 } 84 }
85}
86#else
87static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
88#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
89
90bool strict_msr_control;
91EXPORT_SYMBOL(strict_msr_control);
92
93static int __init enable_strict_msr_control(char *str)
94{
95 strict_msr_control = true;
96 pr_info("Enabling strict facility control\n");
92 97
93 giveup_fpu(tsk); 98 return 0;
94} 99}
100early_param("ppc_strict_facility_enable", enable_strict_msr_control);
95 101
96void giveup_altivec_maybe_transactional(struct task_struct *tsk) 102void msr_check_and_set(unsigned long bits)
97{ 103{
98 /* 104 unsigned long oldmsr = mfmsr();
99 * If we are saving the current thread's registers, and the 105 unsigned long newmsr;
100 * thread is in a transactional state, set the TIF_RESTORE_TM
101 * bit so that we know to restore the registers before
102 * returning to userspace.
103 */
104 if (tsk == current && tsk->thread.regs &&
105 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
106 !test_thread_flag(TIF_RESTORE_TM)) {
107 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
108 set_thread_flag(TIF_RESTORE_TM);
109 }
110 106
111 giveup_altivec(tsk); 107 newmsr = oldmsr | bits;
108
109#ifdef CONFIG_VSX
110 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
111 newmsr |= MSR_VSX;
112#endif
113
114 if (oldmsr != newmsr)
115 mtmsr_isync(newmsr);
112} 116}
113 117
114#else 118void __msr_check_and_clear(unsigned long bits)
115#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) 119{
116#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) 120 unsigned long oldmsr = mfmsr();
117#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 121 unsigned long newmsr;
122
123 newmsr = oldmsr & ~bits;
124
125#ifdef CONFIG_VSX
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr &= ~MSR_VSX;
128#endif
129
130 if (oldmsr != newmsr)
131 mtmsr_isync(newmsr);
132}
133EXPORT_SYMBOL(__msr_check_and_clear);
118 134
119#ifdef CONFIG_PPC_FPU 135#ifdef CONFIG_PPC_FPU
136void giveup_fpu(struct task_struct *tsk)
137{
138 check_if_tm_restore_required(tsk);
139
140 msr_check_and_set(MSR_FP);
141 __giveup_fpu(tsk);
142 msr_check_and_clear(MSR_FP);
143}
144EXPORT_SYMBOL(giveup_fpu);
145
120/* 146/*
121 * Make sure the floating-point register state in the 147 * Make sure the floating-point register state in the
122 * the thread_struct is up to date for task tsk. 148 * the thread_struct is up to date for task tsk.
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk)
134 */ 160 */
135 preempt_disable(); 161 preempt_disable();
136 if (tsk->thread.regs->msr & MSR_FP) { 162 if (tsk->thread.regs->msr & MSR_FP) {
137#ifdef CONFIG_SMP
138 /* 163 /*
139 * This should only ever be called for current or 164 * This should only ever be called for current or
140 * for a stopped child process. Since we save away 165 * for a stopped child process. Since we save away
141 * the FP register state on context switch on SMP, 166 * the FP register state on context switch,
142 * there is something wrong if a stopped child appears 167 * there is something wrong if a stopped child appears
143 * to still have its FP state in the CPU registers. 168 * to still have its FP state in the CPU registers.
144 */ 169 */
145 BUG_ON(tsk != current); 170 BUG_ON(tsk != current);
146#endif 171 giveup_fpu(tsk);
147 giveup_fpu_maybe_transactional(tsk);
148 } 172 }
149 preempt_enable(); 173 preempt_enable();
150 } 174 }
151} 175}
152EXPORT_SYMBOL_GPL(flush_fp_to_thread); 176EXPORT_SYMBOL_GPL(flush_fp_to_thread);
153#endif /* CONFIG_PPC_FPU */
154 177
155void enable_kernel_fp(void) 178void enable_kernel_fp(void)
156{ 179{
157 WARN_ON(preemptible()); 180 WARN_ON(preemptible());
158 181
159#ifdef CONFIG_SMP 182 msr_check_and_set(MSR_FP);
160 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 183
161 giveup_fpu_maybe_transactional(current); 184 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
162 else 185 check_if_tm_restore_required(current);
163 giveup_fpu(NULL); /* just enables FP for kernel */ 186 __giveup_fpu(current);
164#else 187 }
165 giveup_fpu_maybe_transactional(last_task_used_math);
166#endif /* CONFIG_SMP */
167} 188}
168EXPORT_SYMBOL(enable_kernel_fp); 189EXPORT_SYMBOL(enable_kernel_fp);
190#endif /* CONFIG_PPC_FPU */
169 191
170#ifdef CONFIG_ALTIVEC 192#ifdef CONFIG_ALTIVEC
193void giveup_altivec(struct task_struct *tsk)
194{
195 check_if_tm_restore_required(tsk);
196
197 msr_check_and_set(MSR_VEC);
198 __giveup_altivec(tsk);
199 msr_check_and_clear(MSR_VEC);
200}
201EXPORT_SYMBOL(giveup_altivec);
202
171void enable_kernel_altivec(void) 203void enable_kernel_altivec(void)
172{ 204{
173 WARN_ON(preemptible()); 205 WARN_ON(preemptible());
174 206
175#ifdef CONFIG_SMP 207 msr_check_and_set(MSR_VEC);
176 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 208
177 giveup_altivec_maybe_transactional(current); 209 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
178 else 210 check_if_tm_restore_required(current);
179 giveup_altivec_notask(); 211 __giveup_altivec(current);
180#else 212 }
181 giveup_altivec_maybe_transactional(last_task_used_altivec);
182#endif /* CONFIG_SMP */
183} 213}
184EXPORT_SYMBOL(enable_kernel_altivec); 214EXPORT_SYMBOL(enable_kernel_altivec);
185 215
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
192 if (tsk->thread.regs) { 222 if (tsk->thread.regs) {
193 preempt_disable(); 223 preempt_disable();
194 if (tsk->thread.regs->msr & MSR_VEC) { 224 if (tsk->thread.regs->msr & MSR_VEC) {
195#ifdef CONFIG_SMP
196 BUG_ON(tsk != current); 225 BUG_ON(tsk != current);
197#endif 226 giveup_altivec(tsk);
198 giveup_altivec_maybe_transactional(tsk);
199 } 227 }
200 preempt_enable(); 228 preempt_enable();
201 } 229 }
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
204#endif /* CONFIG_ALTIVEC */ 232#endif /* CONFIG_ALTIVEC */
205 233
206#ifdef CONFIG_VSX 234#ifdef CONFIG_VSX
207void enable_kernel_vsx(void) 235void giveup_vsx(struct task_struct *tsk)
208{ 236{
209 WARN_ON(preemptible()); 237 check_if_tm_restore_required(tsk);
210 238
211#ifdef CONFIG_SMP 239 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
212 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) 240 if (tsk->thread.regs->msr & MSR_FP)
213 giveup_vsx(current); 241 __giveup_fpu(tsk);
214 else 242 if (tsk->thread.regs->msr & MSR_VEC)
215 giveup_vsx(NULL); /* just enable vsx for kernel - force */ 243 __giveup_altivec(tsk);
216#else 244 __giveup_vsx(tsk);
217 giveup_vsx(last_task_used_vsx); 245 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
218#endif /* CONFIG_SMP */
219} 246}
220EXPORT_SYMBOL(enable_kernel_vsx); 247EXPORT_SYMBOL(giveup_vsx);
221 248
222void giveup_vsx(struct task_struct *tsk) 249void enable_kernel_vsx(void)
223{ 250{
224 giveup_fpu_maybe_transactional(tsk); 251 WARN_ON(preemptible());
225 giveup_altivec_maybe_transactional(tsk); 252
226 __giveup_vsx(tsk); 253 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
254
255 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
256 check_if_tm_restore_required(current);
257 if (current->thread.regs->msr & MSR_FP)
258 __giveup_fpu(current);
259 if (current->thread.regs->msr & MSR_VEC)
260 __giveup_altivec(current);
261 __giveup_vsx(current);
262 }
227} 263}
228EXPORT_SYMBOL(giveup_vsx); 264EXPORT_SYMBOL(enable_kernel_vsx);
229 265
230void flush_vsx_to_thread(struct task_struct *tsk) 266void flush_vsx_to_thread(struct task_struct *tsk)
231{ 267{
232 if (tsk->thread.regs) { 268 if (tsk->thread.regs) {
233 preempt_disable(); 269 preempt_disable();
234 if (tsk->thread.regs->msr & MSR_VSX) { 270 if (tsk->thread.regs->msr & MSR_VSX) {
235#ifdef CONFIG_SMP
236 BUG_ON(tsk != current); 271 BUG_ON(tsk != current);
237#endif
238 giveup_vsx(tsk); 272 giveup_vsx(tsk);
239 } 273 }
240 preempt_enable(); 274 preempt_enable();
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
244#endif /* CONFIG_VSX */ 278#endif /* CONFIG_VSX */
245 279
246#ifdef CONFIG_SPE 280#ifdef CONFIG_SPE
281void giveup_spe(struct task_struct *tsk)
282{
283 check_if_tm_restore_required(tsk);
284
285 msr_check_and_set(MSR_SPE);
286 __giveup_spe(tsk);
287 msr_check_and_clear(MSR_SPE);
288}
289EXPORT_SYMBOL(giveup_spe);
247 290
248void enable_kernel_spe(void) 291void enable_kernel_spe(void)
249{ 292{
250 WARN_ON(preemptible()); 293 WARN_ON(preemptible());
251 294
252#ifdef CONFIG_SMP 295 msr_check_and_set(MSR_SPE);
253 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 296
254 giveup_spe(current); 297 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
255 else 298 check_if_tm_restore_required(current);
256 giveup_spe(NULL); /* just enable SPE for kernel - force */ 299 __giveup_spe(current);
257#else 300 }
258 giveup_spe(last_task_used_spe);
259#endif /* __SMP __ */
260} 301}
261EXPORT_SYMBOL(enable_kernel_spe); 302EXPORT_SYMBOL(enable_kernel_spe);
262 303
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
265 if (tsk->thread.regs) { 306 if (tsk->thread.regs) {
266 preempt_disable(); 307 preempt_disable();
267 if (tsk->thread.regs->msr & MSR_SPE) { 308 if (tsk->thread.regs->msr & MSR_SPE) {
268#ifdef CONFIG_SMP
269 BUG_ON(tsk != current); 309 BUG_ON(tsk != current);
270#endif
271 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 310 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
272 giveup_spe(tsk); 311 giveup_spe(tsk);
273 } 312 }
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk)
276} 315}
277#endif /* CONFIG_SPE */ 316#endif /* CONFIG_SPE */
278 317
279#ifndef CONFIG_SMP 318static unsigned long msr_all_available;
280/* 319
281 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 320static int __init init_msr_all_available(void)
282 * and the current task has some state, discard it.
283 */
284void discard_lazy_cpu_state(void)
285{ 321{
286 preempt_disable(); 322#ifdef CONFIG_PPC_FPU
287 if (last_task_used_math == current) 323 msr_all_available |= MSR_FP;
288 last_task_used_math = NULL; 324#endif
289#ifdef CONFIG_ALTIVEC 325#ifdef CONFIG_ALTIVEC
290 if (last_task_used_altivec == current) 326 if (cpu_has_feature(CPU_FTR_ALTIVEC))
291 last_task_used_altivec = NULL; 327 msr_all_available |= MSR_VEC;
292#endif /* CONFIG_ALTIVEC */ 328#endif
293#ifdef CONFIG_VSX 329#ifdef CONFIG_VSX
294 if (last_task_used_vsx == current) 330 if (cpu_has_feature(CPU_FTR_VSX))
295 last_task_used_vsx = NULL; 331 msr_all_available |= MSR_VSX;
296#endif /* CONFIG_VSX */ 332#endif
297#ifdef CONFIG_SPE 333#ifdef CONFIG_SPE
298 if (last_task_used_spe == current) 334 if (cpu_has_feature(CPU_FTR_SPE))
299 last_task_used_spe = NULL; 335 msr_all_available |= MSR_SPE;
300#endif 336#endif
301 preempt_enable(); 337
338 return 0;
339}
340early_initcall(init_msr_all_available);
341
342void giveup_all(struct task_struct *tsk)
343{
344 unsigned long usermsr;
345
346 if (!tsk->thread.regs)
347 return;
348
349 usermsr = tsk->thread.regs->msr;
350
351 if ((usermsr & msr_all_available) == 0)
352 return;
353
354 msr_check_and_set(msr_all_available);
355
356#ifdef CONFIG_PPC_FPU
357 if (usermsr & MSR_FP)
358 __giveup_fpu(tsk);
359#endif
360#ifdef CONFIG_ALTIVEC
361 if (usermsr & MSR_VEC)
362 __giveup_altivec(tsk);
363#endif
364#ifdef CONFIG_VSX
365 if (usermsr & MSR_VSX)
366 __giveup_vsx(tsk);
367#endif
368#ifdef CONFIG_SPE
369 if (usermsr & MSR_SPE)
370 __giveup_spe(tsk);
371#endif
372
373 msr_check_and_clear(msr_all_available);
374}
375EXPORT_SYMBOL(giveup_all);
376
377void flush_all_to_thread(struct task_struct *tsk)
378{
379 if (tsk->thread.regs) {
380 preempt_disable();
381 BUG_ON(tsk != current);
382 giveup_all(tsk);
383
384#ifdef CONFIG_SPE
385 if (tsk->thread.regs->msr & MSR_SPE)
386 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
387#endif
388
389 preempt_enable();
390 }
302} 391}
303#endif /* CONFIG_SMP */ 392EXPORT_SYMBOL(flush_all_to_thread);
304 393
305#ifdef CONFIG_PPC_ADV_DEBUG_REGS 394#ifdef CONFIG_PPC_ADV_DEBUG_REGS
306void do_send_trap(struct pt_regs *regs, unsigned long address, 395void do_send_trap(struct pt_regs *regs, unsigned long address,
@@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs)
744 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; 833 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
745 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; 834 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
746 if (msr_diff & MSR_FP) { 835 if (msr_diff & MSR_FP) {
747 fp_enable(); 836 msr_check_and_set(MSR_FP);
748 load_fp_state(&current->thread.fp_state); 837 load_fp_state(&current->thread.fp_state);
838 msr_check_and_clear(MSR_FP);
749 regs->msr |= current->thread.fpexc_mode; 839 regs->msr |= current->thread.fpexc_mode;
750 } 840 }
751 if (msr_diff & MSR_VEC) { 841 if (msr_diff & MSR_VEC) {
752 vec_enable(); 842 msr_check_and_set(MSR_VEC);
753 load_vr_state(&current->thread.vr_state); 843 load_vr_state(&current->thread.vr_state);
844 msr_check_and_clear(MSR_VEC);
754 } 845 }
755 regs->msr |= msr_diff; 846 regs->msr |= msr_diff;
756} 847}
@@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs)
760#define __switch_to_tm(prev) 851#define __switch_to_tm(prev)
761#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 852#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
762 853
763struct task_struct *__switch_to(struct task_struct *prev, 854static inline void save_sprs(struct thread_struct *t)
764 struct task_struct *new)
765{ 855{
766 struct thread_struct *new_thread, *old_thread; 856#ifdef CONFIG_ALTIVEC
767 struct task_struct *last; 857 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
768#ifdef CONFIG_PPC_BOOK3S_64 858 t->vrsave = mfspr(SPRN_VRSAVE);
769 struct ppc64_tlb_batch *batch;
770#endif 859#endif
860#ifdef CONFIG_PPC_BOOK3S_64
861 if (cpu_has_feature(CPU_FTR_DSCR))
862 t->dscr = mfspr(SPRN_DSCR);
771 863
772 WARN_ON(!irqs_disabled()); 864 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
865 t->bescr = mfspr(SPRN_BESCR);
866 t->ebbhr = mfspr(SPRN_EBBHR);
867 t->ebbrr = mfspr(SPRN_EBBRR);
773 868
774 /* Back up the TAR and DSCR across context switches. 869 t->fscr = mfspr(SPRN_FSCR);
775 * Note that the TAR is not available for use in the kernel. (To
776 * provide this, the TAR should be backed up/restored on exception
777 * entry/exit instead, and be in pt_regs. FIXME, this should be in
778 * pt_regs anyway (for debug).)
779 * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
780 * these will change them.
781 */
782 save_early_sprs(&prev->thread);
783 870
784 __switch_to_tm(prev); 871 /*
872 * Note that the TAR is not available for use in the kernel.
873 * (To provide this, the TAR should be backed up/restored on
874 * exception entry/exit instead, and be in pt_regs. FIXME,
875 * this should be in pt_regs anyway (for debug).)
876 */
877 t->tar = mfspr(SPRN_TAR);
878 }
879#endif
880}
785 881
786#ifdef CONFIG_SMP 882static inline void restore_sprs(struct thread_struct *old_thread,
787 /* avoid complexity of lazy save/restore of fpu 883 struct thread_struct *new_thread)
788 * by just saving it every time we switch out if 884{
789 * this task used the fpu during the last quantum.
790 *
791 * If it tries to use the fpu again, it'll trap and
792 * reload its fp regs. So we don't have to do a restore
793 * every switch, just a save.
794 * -- Cort
795 */
796 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
797 giveup_fpu(prev);
798#ifdef CONFIG_ALTIVEC 885#ifdef CONFIG_ALTIVEC
799 /* 886 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
800 * If the previous thread used altivec in the last quantum 887 old_thread->vrsave != new_thread->vrsave)
801 * (thus changing altivec regs) then save them. 888 mtspr(SPRN_VRSAVE, new_thread->vrsave);
802 * We used to check the VRSAVE register but not all apps 889#endif
803 * set it, so we don't rely on it now (and in fact we need 890#ifdef CONFIG_PPC_BOOK3S_64
804 * to save & restore VSCR even if VRSAVE == 0). -- paulus 891 if (cpu_has_feature(CPU_FTR_DSCR)) {
805 * 892 u64 dscr = get_paca()->dscr_default;
806 * On SMP we always save/restore altivec regs just to avoid the 893 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
807 * complexity of changing processors.
808 * -- Cort
809 */
810 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
811 giveup_altivec(prev);
812#endif /* CONFIG_ALTIVEC */
813#ifdef CONFIG_VSX
814 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
815 /* VMX and FPU registers are already save here */
816 __giveup_vsx(prev);
817#endif /* CONFIG_VSX */
818#ifdef CONFIG_SPE
819 /*
820 * If the previous thread used spe in the last quantum
821 * (thus changing spe regs) then save them.
822 *
823 * On SMP we always save/restore spe regs just to avoid the
824 * complexity of changing processors.
825 */
826 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
827 giveup_spe(prev);
828#endif /* CONFIG_SPE */
829 894
830#else /* CONFIG_SMP */ 895 if (new_thread->dscr_inherit) {
831#ifdef CONFIG_ALTIVEC 896 dscr = new_thread->dscr;
832 /* Avoid the trap. On smp this this never happens since 897 fscr |= FSCR_DSCR;
833 * we don't set last_task_used_altivec -- Cort 898 }
834 */
835 if (new->thread.regs && last_task_used_altivec == new)
836 new->thread.regs->msr |= MSR_VEC;
837#endif /* CONFIG_ALTIVEC */
838#ifdef CONFIG_VSX
839 if (new->thread.regs && last_task_used_vsx == new)
840 new->thread.regs->msr |= MSR_VSX;
841#endif /* CONFIG_VSX */
842#ifdef CONFIG_SPE
843 /* Avoid the trap. On smp this this never happens since
844 * we don't set last_task_used_spe
845 */
846 if (new->thread.regs && last_task_used_spe == new)
847 new->thread.regs->msr |= MSR_SPE;
848#endif /* CONFIG_SPE */
849 899
850#endif /* CONFIG_SMP */ 900 if (old_thread->dscr != dscr)
901 mtspr(SPRN_DSCR, dscr);
851 902
852#ifdef CONFIG_PPC_ADV_DEBUG_REGS 903 if (old_thread->fscr != fscr)
853 switch_booke_debug_regs(&new->thread.debug); 904 mtspr(SPRN_FSCR, fscr);
854#else 905 }
855/* 906
856 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 907 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
857 * schedule DABR 908 if (old_thread->bescr != new_thread->bescr)
858 */ 909 mtspr(SPRN_BESCR, new_thread->bescr);
859#ifndef CONFIG_HAVE_HW_BREAKPOINT 910 if (old_thread->ebbhr != new_thread->ebbhr)
860 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk))) 911 mtspr(SPRN_EBBHR, new_thread->ebbhr);
861 __set_breakpoint(&new->thread.hw_brk); 912 if (old_thread->ebbrr != new_thread->ebbrr)
862#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 913 mtspr(SPRN_EBBRR, new_thread->ebbrr);
914
915 if (old_thread->tar != new_thread->tar)
916 mtspr(SPRN_TAR, new_thread->tar);
917 }
863#endif 918#endif
919}
864 920
921struct task_struct *__switch_to(struct task_struct *prev,
922 struct task_struct *new)
923{
924 struct thread_struct *new_thread, *old_thread;
925 struct task_struct *last;
926#ifdef CONFIG_PPC_BOOK3S_64
927 struct ppc64_tlb_batch *batch;
928#endif
865 929
866 new_thread = &new->thread; 930 new_thread = &new->thread;
867 old_thread = &current->thread; 931 old_thread = &current->thread;
868 932
933 WARN_ON(!irqs_disabled());
934
869#ifdef CONFIG_PPC64 935#ifdef CONFIG_PPC64
870 /* 936 /*
871 * Collect processor utilization data per process 937 * Collect processor utilization data per process
@@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
890 } 956 }
891#endif /* CONFIG_PPC_BOOK3S_64 */ 957#endif /* CONFIG_PPC_BOOK3S_64 */
892 958
959#ifdef CONFIG_PPC_ADV_DEBUG_REGS
960 switch_booke_debug_regs(&new->thread.debug);
961#else
962/*
963 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
964 * schedule DABR
965 */
966#ifndef CONFIG_HAVE_HW_BREAKPOINT
967 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
968 __set_breakpoint(&new->thread.hw_brk);
969#endif /* CONFIG_HAVE_HW_BREAKPOINT */
970#endif
971
972 /*
973 * We need to save SPRs before treclaim/trecheckpoint as these will
974 * change a number of them.
975 */
976 save_sprs(&prev->thread);
977
978 __switch_to_tm(prev);
979
980 /* Save FPU, Altivec, VSX and SPE state */
981 giveup_all(prev);
982
893 /* 983 /*
894 * We can't take a PMU exception inside _switch() since there is a 984 * We can't take a PMU exception inside _switch() since there is a
895 * window where the kernel stack SLB and the kernel stack are out 985 * window where the kernel stack SLB and the kernel stack are out
@@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
899 989
900 tm_recheckpoint_new_task(new); 990 tm_recheckpoint_new_task(new);
901 991
992 /*
993 * Call restore_sprs() before calling _switch(). If we move it after
994 * _switch() then we miss out on calling it for new tasks. The reason
995 * for this is we manually create a stack frame for new tasks that
996 * directly returns through ret_from_fork() or
997 * ret_from_kernel_thread(). See copy_thread() for details.
998 */
999 restore_sprs(old_thread, new_thread);
1000
902 last = _switch(old_thread, new_thread); 1001 last = _switch(old_thread, new_thread);
903 1002
904#ifdef CONFIG_PPC_BOOK3S_64 1003#ifdef CONFIG_PPC_BOOK3S_64
@@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs)
952 printk("\n"); 1051 printk("\n");
953} 1052}
954 1053
955static struct regbit { 1054struct regbit {
956 unsigned long bit; 1055 unsigned long bit;
957 const char *name; 1056 const char *name;
958} msr_bits[] = { 1057};
1058
1059static struct regbit msr_bits[] = {
959#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) 1060#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
960 {MSR_SF, "SF"}, 1061 {MSR_SF, "SF"},
961 {MSR_HV, "HV"}, 1062 {MSR_HV, "HV"},
@@ -985,16 +1086,49 @@ static struct regbit {
985 {0, NULL} 1086 {0, NULL}
986}; 1087};
987 1088
988static void printbits(unsigned long val, struct regbit *bits) 1089static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
989{ 1090{
990 const char *sep = ""; 1091 const char *s = "";
991 1092
992 printk("<");
993 for (; bits->bit; ++bits) 1093 for (; bits->bit; ++bits)
994 if (val & bits->bit) { 1094 if (val & bits->bit) {
995 printk("%s%s", sep, bits->name); 1095 printk("%s%s", s, bits->name);
996 sep = ","; 1096 s = sep;
997 } 1097 }
1098}
1099
1100#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1101static struct regbit msr_tm_bits[] = {
1102 {MSR_TS_T, "T"},
1103 {MSR_TS_S, "S"},
1104 {MSR_TM, "E"},
1105 {0, NULL}
1106};
1107
1108static void print_tm_bits(unsigned long val)
1109{
1110/*
1111 * This only prints something if at least one of the TM bit is set.
1112 * Inside the TM[], the output means:
1113 * E: Enabled (bit 32)
1114 * S: Suspended (bit 33)
1115 * T: Transactional (bit 34)
1116 */
1117 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1118 printk(",TM[");
1119 print_bits(val, msr_tm_bits, "");
1120 printk("]");
1121 }
1122}
1123#else
1124static void print_tm_bits(unsigned long val) {}
1125#endif
1126
1127static void print_msr_bits(unsigned long val)
1128{
1129 printk("<");
1130 print_bits(val, msr_bits, ",");
1131 print_tm_bits(val);
998 printk(">"); 1132 printk(">");
999} 1133}
1000 1134
@@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs)
1019 printk("REGS: %p TRAP: %04lx %s (%s)\n", 1153 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1020 regs, regs->trap, print_tainted(), init_utsname()->release); 1154 regs, regs->trap, print_tainted(), init_utsname()->release);
1021 printk("MSR: "REG" ", regs->msr); 1155 printk("MSR: "REG" ", regs->msr);
1022 printbits(regs->msr, msr_bits); 1156 print_msr_bits(regs->msr);
1023 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1157 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1024 trap = TRAP(regs); 1158 trap = TRAP(regs);
1025 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1159 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
@@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs)
1061 1195
1062void exit_thread(void) 1196void exit_thread(void)
1063{ 1197{
1064 discard_lazy_cpu_state();
1065} 1198}
1066 1199
1067void flush_thread(void) 1200void flush_thread(void)
1068{ 1201{
1069 discard_lazy_cpu_state();
1070
1071#ifdef CONFIG_HAVE_HW_BREAKPOINT 1202#ifdef CONFIG_HAVE_HW_BREAKPOINT
1072 flush_ptrace_hw_breakpoint(current); 1203 flush_ptrace_hw_breakpoint(current);
1073#else /* CONFIG_HAVE_HW_BREAKPOINT */ 1204#else /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t)
1086 */ 1217 */
1087int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 1218int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1088{ 1219{
1089 flush_fp_to_thread(src); 1220 flush_all_to_thread(src);
1090 flush_altivec_to_thread(src);
1091 flush_vsx_to_thread(src);
1092 flush_spe_to_thread(src);
1093 /* 1221 /*
1094 * Flush TM state out so we can copy it. __switch_to_tm() does this 1222 * Flush TM state out so we can copy it. __switch_to_tm() does this
1095 * flush but it removes the checkpointed state from the current CPU and 1223 * flush but it removes the checkpointed state from the current CPU and
@@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1212#ifdef CONFIG_PPC64 1340#ifdef CONFIG_PPC64
1213 if (cpu_has_feature(CPU_FTR_DSCR)) { 1341 if (cpu_has_feature(CPU_FTR_DSCR)) {
1214 p->thread.dscr_inherit = current->thread.dscr_inherit; 1342 p->thread.dscr_inherit = current->thread.dscr_inherit;
1215 p->thread.dscr = current->thread.dscr; 1343 p->thread.dscr = mfspr(SPRN_DSCR);
1216 } 1344 }
1217 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1345 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1218 p->thread.ppr = INIT_PPR; 1346 p->thread.ppr = INIT_PPR;
@@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1305 regs->msr = MSR_USER32; 1433 regs->msr = MSR_USER32;
1306 } 1434 }
1307#endif 1435#endif
1308 discard_lazy_cpu_state();
1309#ifdef CONFIG_VSX 1436#ifdef CONFIG_VSX
1310 current->thread.used_vsr = 0; 1437 current->thread.used_vsr = 0;
1311#endif 1438#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 92dea8df6b26..da5192590c44 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -389,6 +389,7 @@ static void __init prom_printf(const char *format, ...)
389 break; 389 break;
390 } 390 }
391 } 391 }
392 va_end(args);
392} 393}
393 394
394 395
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 737c0d0b53ac..30a03c03fe73 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -60,6 +60,7 @@ struct pt_regs_offset {
60#define STR(s) #s /* convert to string */ 60#define STR(s) #s /* convert to string */
61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62#define GPR_OFFSET_NAME(num) \ 62#define GPR_OFFSET_NAME(num) \
63 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
63 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} 64 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
64#define REG_OFFSET_END {.name = NULL, .offset = 0} 65#define REG_OFFSET_END {.name = NULL, .offset = 0}
65 66
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 5a753fae8265..28736ff27fea 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -44,6 +44,9 @@
44#include <asm/mmu.h> 44#include <asm/mmu.h>
45#include <asm/topology.h> 45#include <asm/topology.h>
46 46
47/* This is here deliberately so it's only used in this file */
48void enter_rtas(unsigned long);
49
47struct rtas_t rtas = { 50struct rtas_t rtas = {
48 .lock = __ARCH_SPIN_LOCK_UNLOCKED 51 .lock = __ARCH_SPIN_LOCK_UNLOCKED
49}; 52};
@@ -93,21 +96,13 @@ static void unlock_rtas(unsigned long flags)
93 */ 96 */
94static void call_rtas_display_status(unsigned char c) 97static void call_rtas_display_status(unsigned char c)
95{ 98{
96 struct rtas_args *args = &rtas.args;
97 unsigned long s; 99 unsigned long s;
98 100
99 if (!rtas.base) 101 if (!rtas.base)
100 return; 102 return;
101 s = lock_rtas();
102
103 args->token = cpu_to_be32(10);
104 args->nargs = cpu_to_be32(1);
105 args->nret = cpu_to_be32(1);
106 args->rets = &(args->args[1]);
107 args->args[0] = cpu_to_be32(c);
108
109 enter_rtas(__pa(args));
110 103
104 s = lock_rtas();
105 rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
111 unlock_rtas(s); 106 unlock_rtas(s);
112} 107}
113 108
@@ -418,6 +413,36 @@ static char *__fetch_rtas_last_error(char *altbuf)
418#define get_errorlog_buffer() NULL 413#define get_errorlog_buffer() NULL
419#endif 414#endif
420 415
416
417static void
418va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
419 va_list list)
420{
421 int i;
422
423 args->token = cpu_to_be32(token);
424 args->nargs = cpu_to_be32(nargs);
425 args->nret = cpu_to_be32(nret);
426 args->rets = &(args->args[nargs]);
427
428 for (i = 0; i < nargs; ++i)
429 args->args[i] = cpu_to_be32(va_arg(list, __u32));
430
431 for (i = 0; i < nret; ++i)
432 args->rets[i] = 0;
433
434 enter_rtas(__pa(args));
435}
436
437void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
438{
439 va_list list;
440
441 va_start(list, nret);
442 va_rtas_call_unlocked(args, token, nargs, nret, list);
443 va_end(list);
444}
445
421int rtas_call(int token, int nargs, int nret, int *outputs, ...) 446int rtas_call(int token, int nargs, int nret, int *outputs, ...)
422{ 447{
423 va_list list; 448 va_list list;
@@ -431,22 +456,14 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
431 return -1; 456 return -1;
432 457
433 s = lock_rtas(); 458 s = lock_rtas();
459
460 /* We use the global rtas args buffer */
434 rtas_args = &rtas.args; 461 rtas_args = &rtas.args;
435 462
436 rtas_args->token = cpu_to_be32(token);
437 rtas_args->nargs = cpu_to_be32(nargs);
438 rtas_args->nret = cpu_to_be32(nret);
439 rtas_args->rets = &(rtas_args->args[nargs]);
440 va_start(list, outputs); 463 va_start(list, outputs);
441 for (i = 0; i < nargs; ++i) 464 va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
442 rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
443 va_end(list); 465 va_end(list);
444 466
445 for (i = 0; i < nret; ++i)
446 rtas_args->rets[i] = 0;
447
448 enter_rtas(__pa(rtas_args));
449
450 /* A -1 return code indicates that the last command couldn't 467 /* A -1 return code indicates that the last command couldn't
451 be completed due to a hardware error. */ 468 be completed due to a hardware error. */
452 if (be32_to_cpu(rtas_args->rets[0]) == -1) 469 if (be32_to_cpu(rtas_args->rets[0]) == -1)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index ef7c24e84a62..b6aa378aff63 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
458 * contains valid data 458 * contains valid data
459 */ 459 */
460 if (current->thread.used_vsr && ctx_has_vsx_region) { 460 if (current->thread.used_vsr && ctx_has_vsx_region) {
461 __giveup_vsx(current); 461 flush_vsx_to_thread(current);
462 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 462 if (copy_vsx_to_user(&frame->mc_vsregs, current))
463 return 1; 463 return 1;
464 msr |= MSR_VSX; 464 msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
606 * contains valid data 606 * contains valid data
607 */ 607 */
608 if (current->thread.used_vsr) { 608 if (current->thread.used_vsr) {
609 __giveup_vsx(current); 609 flush_vsx_to_thread(current);
610 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 610 if (copy_vsx_to_user(&frame->mc_vsregs, current))
611 return 1; 611 return 1;
612 if (msr & MSR_VSX) { 612 if (msr & MSR_VSX) {
@@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs,
687 if (sig) 687 if (sig)
688 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 688 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
689 689
690 /*
691 * Do this before updating the thread state in
692 * current->thread.fpr/vr/evr. That way, if we get preempted
693 * and another task grabs the FPU/Altivec/SPE, it won't be
694 * tempted to save the current CPU state into the thread_struct
695 * and corrupt what we are writing there.
696 */
697 discard_lazy_cpu_state();
698
699#ifdef CONFIG_ALTIVEC 690#ifdef CONFIG_ALTIVEC
700 /* 691 /*
701 * Force the process to reload the altivec registers from 692 * Force the process to reload the altivec registers from
@@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
798 /* Restore the previous little-endian mode */ 789 /* Restore the previous little-endian mode */
799 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 790 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
800 791
801 /*
802 * Do this before updating the thread state in
803 * current->thread.fpr/vr/evr. That way, if we get preempted
804 * and another task grabs the FPU/Altivec/SPE, it won't be
805 * tempted to save the current CPU state into the thread_struct
806 * and corrupt what we are writing there.
807 */
808 discard_lazy_cpu_state();
809
810#ifdef CONFIG_ALTIVEC 792#ifdef CONFIG_ALTIVEC
811 regs->msr &= ~MSR_VEC; 793 regs->msr &= ~MSR_VEC;
812 if (msr & MSR_VEC) { 794 if (msr & MSR_VEC) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c676ecec0869..25520794aa37 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
147 * VMX data. 147 * VMX data.
148 */ 148 */
149 if (current->thread.used_vsr && ctx_has_vsx_region) { 149 if (current->thread.used_vsr && ctx_has_vsx_region) {
150 __giveup_vsx(current); 150 flush_vsx_to_thread(current);
151 v_regs += ELF_NVRREG; 151 v_regs += ELF_NVRREG;
152 err |= copy_vsx_to_user(v_regs, current); 152 err |= copy_vsx_to_user(v_regs, current);
153 /* set MSR_VSX in the MSR value in the frame to 153 /* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
270 * VMX data. 270 * VMX data.
271 */ 271 */
272 if (current->thread.used_vsr) { 272 if (current->thread.used_vsr) {
273 __giveup_vsx(current); 273 flush_vsx_to_thread(current);
274 v_regs += ELF_NVRREG; 274 v_regs += ELF_NVRREG;
275 tm_v_regs += ELF_NVRREG; 275 tm_v_regs += ELF_NVRREG;
276 276
@@ -350,15 +350,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
350 err |= __get_user(set->sig[0], &sc->oldmask); 350 err |= __get_user(set->sig[0], &sc->oldmask);
351 351
352 /* 352 /*
353 * Do this before updating the thread state in
354 * current->thread.fpr/vr. That way, if we get preempted
355 * and another task grabs the FPU/Altivec, it won't be
356 * tempted to save the current CPU state into the thread_struct
357 * and corrupt what we are writing there.
358 */
359 discard_lazy_cpu_state();
360
361 /*
362 * Force reload of FP/VEC. 353 * Force reload of FP/VEC.
363 * This has to be done before copying stuff into current->thread.fpr/vr 354 * This has to be done before copying stuff into current->thread.fpr/vr
364 * for the reasons explained in the previous comment. 355 * for the reasons explained in the previous comment.
@@ -469,15 +460,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
469 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 460 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
470 461
471 /* 462 /*
472 * Do this before updating the thread state in
473 * current->thread.fpr/vr. That way, if we get preempted
474 * and another task grabs the FPU/Altivec, it won't be
475 * tempted to save the current CPU state into the thread_struct
476 * and corrupt what we are writing there.
477 */
478 discard_lazy_cpu_state();
479
480 /*
481 * Force reload of FP/VEC. 463 * Force reload of FP/VEC.
482 * This has to be done before copying stuff into current->thread.fpr/vr 464 * This has to be done before copying stuff into current->thread.fpr/vr
483 * for the reasons explained in the previous comment. 465 * for the reasons explained in the previous comment.
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index ea43a347a104..4f24606afc3f 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -61,3 +61,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
61 save_context_stack(trace, tsk->thread.ksp, tsk, 0); 61 save_context_stack(trace, tsk->thread.ksp, tsk, 0);
62} 62}
63EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 63EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
64
65void
66save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
67{
68 save_context_stack(trace, regs->gpr[1], current, 0);
69}
70EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index eae33e10b65f..6669b1752512 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -20,9 +20,7 @@ void save_processor_state(void)
20 * flush out all the special registers so we don't need 20 * flush out all the special registers so we don't need
21 * to save them in the snapshot 21 * to save them in the snapshot
22 */ 22 */
23 flush_fp_to_thread(current); 23 flush_all_to_thread(current);
24 flush_altivec_to_thread(current);
25 flush_spe_to_thread(current);
26 24
27#ifdef CONFIG_PPC64 25#ifdef CONFIG_PPC64
28 hard_irq_disable(); 26 hard_irq_disable();
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 2384129f5893..55323a620cfe 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -57,4 +57,4 @@
57 57
58START_TABLE 58START_TABLE
59#include <asm/systbl.h> 59#include <asm/systbl.h>
60END_TABLE __NR_syscalls 60END_TABLE NR_syscalls
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
index 19415e7674a5..31b6e7c358ca 100644
--- a/arch/powerpc/kernel/systbl_chk.sh
+++ b/arch/powerpc/kernel/systbl_chk.sh
@@ -16,7 +16,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file
16 /^START_TABLE/ { num = 0; next; } 16 /^START_TABLE/ { num = 0; next; }
17 /^END_TABLE/ { 17 /^END_TABLE/ {
18 if (num != $2) { 18 if (num != $2) {
19 printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n", 19 printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n",
20 $2, num - 1; 20 $2, num - 1;
21 exit(1); 21 exit(1);
22 } 22 }
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1be1092c7204..81b0900a39ee 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1002,38 +1002,6 @@ static int month_days[12] = {
1002 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1002 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1003}; 1003};
1004 1004
1005/*
1006 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1007 */
1008void GregorianDay(struct rtc_time * tm)
1009{
1010 int leapsToDate;
1011 int lastYear;
1012 int day;
1013 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1014
1015 lastYear = tm->tm_year - 1;
1016
1017 /*
1018 * Number of leap corrections to apply up to end of last year
1019 */
1020 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1021
1022 /*
1023 * This year is a leap year if it is divisible by 4 except when it is
1024 * divisible by 100 unless it is divisible by 400
1025 *
1026 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1027 */
1028 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1029
1030 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1031 tm->tm_mday;
1032
1033 tm->tm_wday = day % 7;
1034}
1035EXPORT_SYMBOL_GPL(GregorianDay);
1036
1037void to_tm(int tim, struct rtc_time * tm) 1005void to_tm(int tim, struct rtc_time * tm)
1038{ 1006{
1039 register int i; 1007 register int i;
@@ -1064,9 +1032,9 @@ void to_tm(int tim, struct rtc_time * tm)
1064 tm->tm_mday = day + 1; 1032 tm->tm_mday = day + 1;
1065 1033
1066 /* 1034 /*
1067 * Determine the day of week 1035 * No-one uses the day of the week.
1068 */ 1036 */
1069 GregorianDay(tm); 1037 tm->tm_wday = -1;
1070} 1038}
1071EXPORT_SYMBOL(to_tm); 1039EXPORT_SYMBOL(to_tm);
1072 1040
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37de90f8a845..b6becc795bb5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs)
1313 die("nonrecoverable exception", regs, SIGKILL); 1313 die("nonrecoverable exception", regs, SIGKILL);
1314} 1314}
1315 1315
1316void trace_syscall(struct pt_regs *regs)
1317{
1318 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
1319 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1320 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1321}
1322
1323void kernel_fp_unavailable_exception(struct pt_regs *regs) 1316void kernel_fp_unavailable_exception(struct pt_regs *regs)
1324{ 1317{
1325 enum ctx_state prev_state = exception_enter(); 1318 enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b457bfa28436..def1b8b5e6c1 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void)
671 extern unsigned long sys_ni_syscall; 671 extern unsigned long sys_ni_syscall;
672 672
673 673
674 for (i = 0; i < __NR_syscalls; i++) { 674 for (i = 0; i < NR_syscalls; i++) {
675#ifdef CONFIG_PPC64 675#ifdef CONFIG_PPC64
676 if (sys_call_table[i*2] != sys_ni_syscall) 676 if (sys_call_table[i*2] != sys_ni_syscall)
677 vdso_data->syscall_map_64[i >> 5] |= 677 vdso_data->syscall_map_64[i >> 5] |=
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index 59cf5f452879..3745113fcc65 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
61 addi r3,r3,CFG_SYSCALL_MAP32 61 addi r3,r3,CFG_SYSCALL_MAP32
62 cmpli cr0,r4,0 62 cmpli cr0,r4,0
63 beqlr 63 beqlr
64 li r0,__NR_syscalls 64 li r0,NR_syscalls
65 stw r0,0(r4) 65 stw r0,0(r4)
66 crclr cr0*4+so 66 crclr cr0*4+so
67 blr 67 blr
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 2f01c4a0d8a0..184a6ba7f283 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
62 cmpli cr0,r4,0 62 cmpli cr0,r4,0
63 crclr cr0*4+so 63 crclr cr0*4+so
64 beqlr 64 beqlr
65 li r0,__NR_syscalls 65 li r0,NR_syscalls
66 stw r0,0(r4) 66 stw r0,0(r4)
67 blr 67 blr
68 .cfi_endproc 68 .cfi_endproc
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f5c80d567d8d..162d0f714941 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -29,24 +29,10 @@ _GLOBAL(do_load_up_transact_altivec)
29 addi r10,r3,THREAD_TRANSACT_VRSTATE 29 addi r10,r3,THREAD_TRANSACT_VRSTATE
30 REST_32VRS(0,r4,r10) 30 REST_32VRS(0,r4,r10)
31 31
32 /* Disable VEC again. */
33 MTMSRD(r6)
34 isync
35
36 blr 32 blr
37#endif 33#endif
38 34
39/* 35/*
40 * Enable use of VMX/Altivec for the caller.
41 */
42_GLOBAL(vec_enable)
43 mfmsr r3
44 oris r3,r3,MSR_VEC@h
45 MTMSRD(r3)
46 isync
47 blr
48
49/*
50 * Load state from memory into VMX registers including VSCR. 36 * Load state from memory into VMX registers including VSCR.
51 * Assumes the caller has enabled VMX in the MSR. 37 * Assumes the caller has enabled VMX in the MSR.
52 */ 38 */
@@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec)
84 MTMSRD(r5) /* enable use of AltiVec now */ 70 MTMSRD(r5) /* enable use of AltiVec now */
85 isync 71 isync
86 72
87/*
88 * For SMP, we don't do lazy VMX switching because it just gets too
89 * horrendously complex, especially when a task switches from one CPU
90 * to another. Instead we call giveup_altvec in switch_to.
91 * VRSAVE isn't dealt with here, that is done in the normal context
92 * switch code. Note that we could rely on vrsave value to eventually
93 * avoid saving all of the VREGs here...
94 */
95#ifndef CONFIG_SMP
96 LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
97 toreal(r3)
98 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
99 PPC_LCMPI 0,r4,0
100 beq 1f
101
102 /* Save VMX state to last_task_used_altivec's THREAD struct */
103 toreal(r4)
104 addi r4,r4,THREAD
105 addi r6,r4,THREAD_VRSTATE
106 SAVE_32VRS(0,r5,r6)
107 mfvscr v0
108 li r10,VRSTATE_VSCR
109 stvx v0,r10,r6
110 /* Disable VMX for last_task_used_altivec */
111 PPC_LL r5,PT_REGS(r4)
112 toreal(r5)
113 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
114 lis r10,MSR_VEC@h
115 andc r4,r4,r10
116 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1171:
118#endif /* CONFIG_SMP */
119
120 /* Hack: if we get an altivec unavailable trap with VRSAVE 73 /* Hack: if we get an altivec unavailable trap with VRSAVE
121 * set to all zeros, we assume this is a broken application 74 * set to all zeros, we assume this is a broken application
122 * that fails to set it properly, and thus we switch it to 75 * that fails to set it properly, and thus we switch it to
@@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec)
145 lvx v0,r10,r6 98 lvx v0,r10,r6
146 mtvscr v0 99 mtvscr v0
147 REST_32VRS(0,r4,r6) 100 REST_32VRS(0,r4,r6)
148#ifndef CONFIG_SMP
149 /* Update last_task_used_altivec to 'current' */
150 subi r4,r5,THREAD /* Back to 'current' */
151 fromreal(r4)
152 PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
153#endif /* CONFIG_SMP */
154 /* restore registers and return */ 101 /* restore registers and return */
155 blr 102 blr
156 103
157_GLOBAL(giveup_altivec_notask)
158 mfmsr r3
159 andis. r4,r3,MSR_VEC@h
160 bnelr /* Already enabled? */
161 oris r3,r3,MSR_VEC@h
162 SYNC
163 MTMSRD(r3) /* enable use of VMX now */
164 isync
165 blr
166
167/* 104/*
168 * giveup_altivec(tsk) 105 * __giveup_altivec(tsk)
169 * Disable VMX for the task given as the argument, 106 * Disable VMX for the task given as the argument,
170 * and save the vector registers in its thread_struct. 107 * and save the vector registers in its thread_struct.
171 * Enables the VMX for use in the kernel on return.
172 */ 108 */
173_GLOBAL(giveup_altivec) 109_GLOBAL(__giveup_altivec)
174 mfmsr r5
175 oris r5,r5,MSR_VEC@h
176 SYNC
177 MTMSRD(r5) /* enable use of VMX now */
178 isync
179 PPC_LCMPI 0,r3,0
180 beqlr /* if no previous owner, done */
181 addi r3,r3,THREAD /* want THREAD of task */ 110 addi r3,r3,THREAD /* want THREAD of task */
182 PPC_LL r7,THREAD_VRSAVEAREA(r3) 111 PPC_LL r7,THREAD_VRSAVEAREA(r3)
183 PPC_LL r5,PT_REGS(r3) 112 PPC_LL r5,PT_REGS(r3)
@@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
203 andc r4,r4,r3 /* disable FP for previous task */ 132 andc r4,r4,r3 /* disable FP for previous task */
204 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 133 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2051: 1341:
206#ifndef CONFIG_SMP
207 li r5,0
208 LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
209 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
210#endif /* CONFIG_SMP */
211 blr 135 blr
212 136
213#ifdef CONFIG_VSX 137#ifdef CONFIG_VSX
@@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx)
230 andis. r5,r12,MSR_VEC@h 154 andis. r5,r12,MSR_VEC@h
231 beql+ load_up_altivec /* skip if already loaded */ 155 beql+ load_up_altivec /* skip if already loaded */
232 156
233#ifndef CONFIG_SMP
234 ld r3,last_task_used_vsx@got(r2)
235 ld r4,0(r3)
236 cmpdi 0,r4,0
237 beq 1f
238 /* Disable VSX for last_task_used_vsx */
239 addi r4,r4,THREAD
240 ld r5,PT_REGS(r4)
241 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
242 lis r6,MSR_VSX@h
243 andc r6,r4,r6
244 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
2451:
246#endif /* CONFIG_SMP */
247 ld r4,PACACURRENT(r13) 157 ld r4,PACACURRENT(r13)
248 addi r4,r4,THREAD /* Get THREAD */ 158 addi r4,r4,THREAD /* Get THREAD */
249 li r6,1 159 li r6,1
@@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx)
251 /* enable use of VSX after return */ 161 /* enable use of VSX after return */
252 oris r12,r12,MSR_VSX@h 162 oris r12,r12,MSR_VSX@h
253 std r12,_MSR(r1) 163 std r12,_MSR(r1)
254#ifndef CONFIG_SMP
255 /* Update last_task_used_vsx to 'current' */
256 ld r4,PACACURRENT(r13)
257 std r4,0(r3)
258#endif /* CONFIG_SMP */
259 b fast_exception_return 164 b fast_exception_return
260 165
261/* 166/*
262 * __giveup_vsx(tsk) 167 * __giveup_vsx(tsk)
263 * Disable VSX for the task given as the argument. 168 * Disable VSX for the task given as the argument.
264 * Does NOT save vsx registers. 169 * Does NOT save vsx registers.
265 * Enables the VSX for use in the kernel on return.
266 */ 170 */
267_GLOBAL(__giveup_vsx) 171_GLOBAL(__giveup_vsx)
268 mfmsr r5
269 oris r5,r5,MSR_VSX@h
270 mtmsrd r5 /* enable use of VSX now */
271 isync
272
273 cmpdi 0,r3,0
274 beqlr- /* if no previous owner, done */
275 addi r3,r3,THREAD /* want THREAD of task */ 172 addi r3,r3,THREAD /* want THREAD of task */
276 ld r5,PT_REGS(r3) 173 ld r5,PT_REGS(r3)
277 cmpdi 0,r5,0 174 cmpdi 0,r5,0
@@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx)
281 andc r4,r4,r3 /* disable VSX for previous task */ 178 andc r4,r4,r3 /* disable VSX for previous task */
282 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 179 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2831: 1801:
284#ifndef CONFIG_SMP
285 li r5,0
286 ld r4,last_task_used_vsx@got(r2)
287 std r5,0(r4)
288#endif /* CONFIG_SMP */
289 blr 181 blr
290 182
291#endif /* CONFIG_VSX */ 183#endif /* CONFIG_VSX */