aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
6 files changed, 51 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4bf08c8..767ef6d68c9e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@ resume_kernel:
689 689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691 691
692 lwz r3,GPR1(r1) 692 ld r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */ 694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */ 695 mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
703 addi r6,r6,8 703 addi r6,r6,8
704 bdnz 2b 704 bdnz 2b
705 705
706 /* Do real store operation to complete stwu */ 706 /* Do real store operation to complete stdu */
707 lwz r5,GPR1(r1) 707 ld r5,GPR1(r1)
708 std r8,0(r5) 708 std r8,0(r5)
709 709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */ 710 /* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 857bf7c5b946..6353019966e6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
983 EXCEPTION_PROLOG_COMMON_3(0xe60) 983 EXCEPTION_PROLOG_COMMON_3(0xe60)
984 addi r3,r1,STACK_FRAME_OVERHEAD 984 addi r3,r1,STACK_FRAME_OVERHEAD
985 BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) 985 BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
986 /* Windup the stack. */ 986 /* Windup the stack. */
987 /* Move original HSRR0 and HSRR1 into the respective regs */ 987 /* Move original HSRR0 and HSRR1 into the respective regs */
988 ld r9,_MSR(r1) 988 ld r9,_MSR(r1)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
449_GLOBAL(pnv_wakeup_tb_loss) 449_GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13) 450 ld r1,PACAR1(r13)
451 /* 451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack 452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * and they are restored before switching to the process context. Hence 453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * until they are restored, they are free to be used. 454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
455 * 469 *
456 * Save SRR1 and LR in NVGPRs as they might be clobbered in 470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
457 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}