aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable-radix.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-07-05 04:47:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2018-07-15 21:37:21 -0400
commit2bf1071a8d50928a4ae366bb3108833166c2b70c (patch)
treeebffef07f7ebbb9bb1ba231c4cb8ff00cc6fd795 /arch/powerpc/mm/pgtable-radix.c
parentce397d215ccd07b8ae3f71db689aedb85d56ab40 (diff)
powerpc/64s: Remove POWER9 DD1 support
POWER9 DD1 was never a product. It is no longer supported by upstream firmware, and it is not effectively supported in Linux due to lack of testing. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Michael Ellerman <mpe@ellerman.id.au> [mpe: Remove arch_make_huge_pte() entirely] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/pgtable-radix.c')
-rw-r--r--arch/powerpc/mm/pgtable-radix.c60
1 files changed, 2 insertions, 58 deletions
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 96f68c5aa1f5..bba168d02235 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void)
226{ 226{
227 unsigned long start, end; 227 unsigned long start, end;
228 228
229 /*
230 * mark_rodata_ro() will mark itself as !writable at some point.
231 * Due to DD1 workaround in radix__pte_update(), we'll end up with
232 * an invalid pte and the system will crash quite severly.
233 */
234 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
235 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
236 return;
237 }
238
239 start = (unsigned long)_stext; 229 start = (unsigned long)_stext;
240 end = (unsigned long)__init_begin; 230 end = (unsigned long)__init_begin;
241 231
@@ -533,35 +523,6 @@ found:
533 return; 523 return;
534} 524}
535 525
536static void update_hid_for_radix(void)
537{
538 unsigned long hid0;
539 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
540
541 asm volatile("ptesync": : :"memory");
542 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
543 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
544 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
545 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
546 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
547 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
548 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
549 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
550 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
551
552 /*
553 * now switch the HID
554 */
555 hid0 = mfspr(SPRN_HID0);
556 hid0 |= HID0_POWER9_RADIX;
557 mtspr(SPRN_HID0, hid0);
558 asm volatile("isync": : :"memory");
559
560 /* Wait for it to happen */
561 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
562 cpu_relax();
563}
564
565static void radix_init_amor(void) 526static void radix_init_amor(void)
566{ 527{
567 /* 528 /*
@@ -576,22 +537,12 @@ static void radix_init_amor(void)
576 537
577static void radix_init_iamr(void) 538static void radix_init_iamr(void)
578{ 539{
579 unsigned long iamr;
580
581 /*
582 * The IAMR should set to 0 on DD1.
583 */
584 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
585 iamr = 0;
586 else
587 iamr = (1ul << 62);
588
589 /* 540 /*
590 * Radix always uses key0 of the IAMR to determine if an access is 541 * Radix always uses key0 of the IAMR to determine if an access is
591 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction 542 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
592 * fetch. 543 * fetch.
593 */ 544 */
594 mtspr(SPRN_IAMR, iamr); 545 mtspr(SPRN_IAMR, (1ul << 62));
595} 546}
596 547
597void __init radix__early_init_mmu(void) 548void __init radix__early_init_mmu(void)
@@ -644,8 +595,6 @@ void __init radix__early_init_mmu(void)
644 595
645 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 596 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
646 radix_init_native(); 597 radix_init_native();
647 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
648 update_hid_for_radix();
649 lpcr = mfspr(SPRN_LPCR); 598 lpcr = mfspr(SPRN_LPCR);
650 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 599 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
651 radix_init_partition_table(); 600 radix_init_partition_table();
@@ -671,10 +620,6 @@ void radix__early_init_mmu_secondary(void)
671 * update partition table control register and UPRT 620 * update partition table control register and UPRT
672 */ 621 */
673 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 622 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
674
675 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
676 update_hid_for_radix();
677
678 lpcr = mfspr(SPRN_LPCR); 623 lpcr = mfspr(SPRN_LPCR);
679 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 624 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
680 625
@@ -1095,8 +1040,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1095 * To avoid NMMU hang while relaxing access, we need mark 1040 * To avoid NMMU hang while relaxing access, we need mark
1096 * the pte invalid in between. 1041 * the pte invalid in between.
1097 */ 1042 */
1098 if (cpu_has_feature(CPU_FTR_POWER9_DD1) || 1043 if (atomic_read(&mm->context.copros) > 0) {
1099 atomic_read(&mm->context.copros) > 0) {
1100 unsigned long old_pte, new_pte; 1044 unsigned long old_pte, new_pte;
1101 1045
1102 old_pte = __radix_pte_update(ptep, ~0, 0); 1046 old_pte = __radix_pte_update(ptep, ~0, 0);