aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/include/asm/exception-64s.h8
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/init_64.c3
11 files changed, 68 insertions, 25 deletions
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 411994551afc..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
33 } 33 }
34 34
35 if (len & ~VMX_ALIGN_MASK) { 35 if (len & ~VMX_ALIGN_MASK) {
36 preempt_disable();
36 pagefault_disable(); 37 pagefault_disable();
37 enable_kernel_altivec(); 38 enable_kernel_altivec();
38 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); 39 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
40 disable_kernel_altivec();
39 pagefault_enable(); 41 pagefault_enable();
42 preempt_enable();
40 } 43 }
41 44
42 tail = len & VMX_ALIGN_MASK; 45 tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 14752eee3d0c..ed3beadd2cc5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
236 mtctr reg; \ 236 mtctr reg; \
237 bctr 237 bctr
238 238
239#define BRANCH_LINK_TO_FAR(reg, label) \ 239#define BRANCH_LINK_TO_FAR(label) \
240 __LOAD_FAR_HANDLER(reg, label); \ 240 __LOAD_FAR_HANDLER(r12, label); \
241 mtctr reg; \ 241 mtctr r12; \
242 bctrl 242 bctrl
243 243
244/* 244/*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
265#define BRANCH_TO_COMMON(reg, label) \ 265#define BRANCH_TO_COMMON(reg, label) \
266 b label 266 b label
267 267
268#define BRANCH_LINK_TO_FAR(reg, label) \ 268#define BRANCH_LINK_TO_FAR(label) \
269 bl label 269 bl label
270 270
271#define BRANCH_TO_KVM(reg, label) \ 271#define BRANCH_TO_KVM(reg, label) \
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4bf08c8..767ef6d68c9e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@ resume_kernel:
689 689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691 691
692 lwz r3,GPR1(r1) 692 ld r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */ 694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */ 695 mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
703 addi r6,r6,8 703 addi r6,r6,8
704 bdnz 2b 704 bdnz 2b
705 705
706 /* Do real store operation to complete stwu */ 706 /* Do real store operation to complete stdu */
707 lwz r5,GPR1(r1) 707 ld r5,GPR1(r1)
708 std r8,0(r5) 708 std r8,0(r5)
709 709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */ 710 /* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 857bf7c5b946..6353019966e6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
983 EXCEPTION_PROLOG_COMMON_3(0xe60) 983 EXCEPTION_PROLOG_COMMON_3(0xe60)
984 addi r3,r1,STACK_FRAME_OVERHEAD 984 addi r3,r1,STACK_FRAME_OVERHEAD
985 BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) 985 BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
986 /* Windup the stack. */ 986 /* Windup the stack. */
987 /* Move original HSRR0 and HSRR1 into the respective regs */ 987 /* Move original HSRR0 and HSRR1 into the respective regs */
988 ld r9,_MSR(r1) 988 ld r9,_MSR(r1)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
449_GLOBAL(pnv_wakeup_tb_loss) 449_GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13) 450 ld r1,PACAR1(r13)
451 /* 451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack 452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * and they are restored before switching to the process context. Hence 453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * until they are restored, they are free to be used. 454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
455 * 469 *
456 * Save SRR1 and LR in NVGPRs as they might be clobbered in 470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
457 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1487 /* start new resize */ 1487 /* start new resize */
1488 1488
1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1490 if (!resize) {
1491 ret = -ENOMEM;
1492 goto out;
1493 }
1490 resize->order = shift; 1494 resize->order = shift;
1491 resize->kvm = kvm; 1495 resize->kvm = kvm;
1492 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1496 INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
638 unsigned long psize = batch->psize; 638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize; 639 int ssize = batch->ssize;
640 int i; 640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
641 645
642 local_irq_save(flags); 646 local_irq_save(flags);
643 647
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
667 } pte_iterate_hashed_end(); 671 } pte_iterate_hashed_end();
668 } 672 }
669 673
670 if (mmu_has_feature(MMU_FTR_TLBIEL) && 674 if (use_local) {
671 mmu_psize_defs[psize].tlbiel && local) {
672 asm volatile("ptesync":::"memory"); 675 asm volatile("ptesync":::"memory");
673 for (i = 0; i < number; i++) { 676 for (i = 0; i < number; i++) {
674 vpn = batch->vpn[i]; 677 vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be992083d2a..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
397void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
398{ 398{
399 /* Disable radix mode based on kernel command line. */ 399 /* Disable radix mode based on kernel command line. */
400 /* We don't yet have the machinery to do radix as a guest. */ 400 if (disable_radix)
401 if (disable_radix || !(mfmsr() & MSR_HV))
402 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 401 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
403 402
404 /* 403 /*