aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h30
-rw-r--r--arch/powerpc/include/asm/btext.h4
-rw-r--r--arch/powerpc/include/asm/kexec.h3
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c3
-rw-r--r--arch/powerpc/mm/pgtable.c16
8 files changed, 59 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 7dede2e34b70..ccf00a8b98c6 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
876 return false; 876 return false;
877} 877}
878 878
879static inline int pmd_is_serializing(pmd_t pmd)
880{
881 /*
882 * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
883 * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
884 *
885 * This condition may also occur when flushing a pmd while flushing
886 * it (see ptep_modify_prot_start), so callers must ensure this
887 * case is fine as well.
888 */
889 if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
890 cpu_to_be64(_PAGE_INVALID))
891 return true;
892
893 return false;
894}
895
879static inline int pmd_bad(pmd_t pmd) 896static inline int pmd_bad(pmd_t pmd)
880{ 897{
881 if (radix_enabled()) 898 if (radix_enabled())
@@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
1092#define pmd_access_permitted pmd_access_permitted 1109#define pmd_access_permitted pmd_access_permitted
1093static inline bool pmd_access_permitted(pmd_t pmd, bool write) 1110static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1094{ 1111{
1112 /*
1113 * pmdp_invalidate sets this combination (which is not caught by
1114 * !pte_present() check in pte_access_permitted), to prevent
1115 * lock-free lookups, as part of the serialize_against_pte_lookup()
1116 * synchronisation.
1117 *
1118 * This also catches the case where the PTE's hardware PRESENT bit is
1119 * cleared while TLB is flushed, which is suboptimal but should not
1120 * be frequent.
1121 */
1122 if (pmd_is_serializing(pmd))
1123 return false;
1124
1095 return pte_access_permitted(pmd_pte(pmd), write); 1125 return pte_access_permitted(pmd_pte(pmd), write);
1096} 1126}
1097 1127
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 3ffad030393c..461b0f193864 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
13 int depth, int pitch); 13 int depth, int pitch);
14extern void btext_setup_display(int width, int height, int depth, int pitch, 14extern void btext_setup_display(int width, int height, int depth, int pitch,
15 unsigned long address); 15 unsigned long address);
16#ifdef CONFIG_PPC32
16extern void btext_prepare_BAT(void); 17extern void btext_prepare_BAT(void);
18#else
19static inline void btext_prepare_BAT(void) { }
20#endif
17extern void btext_map(void); 21extern void btext_map(void);
18extern void btext_unmap(void); 22extern void btext_unmap(void);
19 23
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 4a585cba1787..c68476818753 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
94 return crashing_cpu >= 0; 94 return crashing_cpu >= 0;
95} 95}
96 96
97void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
98 unsigned long start_address) __noreturn;
99
97#ifdef CONFIG_KEXEC_FILE 100#ifdef CONFIG_KEXEC_FILE
98extern const struct kexec_file_ops kexec_elf64_ops; 101extern const struct kexec_file_ops kexec_elf64_ops;
99 102
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index affe5dcce7f4..2b160d68db49 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
30 */ 30 */
31void default_machine_kexec(struct kimage *image) 31void default_machine_kexec(struct kimage *image)
32{ 32{
33 extern const unsigned char relocate_new_kernel[];
34 extern const unsigned int relocate_new_kernel_size; 33 extern const unsigned int relocate_new_kernel_size;
35 unsigned long page_list; 34 unsigned long page_list;
36 unsigned long reboot_code_buffer, reboot_code_buffer_phys; 35 unsigned long reboot_code_buffer, reboot_code_buffer_phys;
@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
58 reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); 57 reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
59 printk(KERN_INFO "Bye!\n"); 58 printk(KERN_INFO "Bye!\n");
60 59
60 if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
61 relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
62
61 /* now call it */ 63 /* now call it */
62 rnk = (relocate_new_kernel_t) reboot_code_buffer; 64 rnk = (relocate_new_kernel_t) reboot_code_buffer;
63 (*rnk)(page_list, reboot_code_buffer_phys, image->start); 65 (*rnk)(page_list, reboot_code_buffer_phys, image->start);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 3555cad7bdde..ed446b7ea164 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
2336 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2336 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2337 width, height, pitch, addr); 2337 width, height, pitch, addr);
2338 btext_setup_display(width, height, 8, pitch, addr); 2338 btext_setup_display(width, height, 8, pitch, addr);
2339 btext_prepare_BAT();
2339 } 2340 }
2340#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2341#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2341 } 2342 }
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 518d416971c1..160bef0d553d 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -24,7 +24,7 @@ fi
24WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush 24WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
25_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold 25_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
26__secondary_hold_acknowledge __secondary_hold_spinloop __start 26__secondary_hold_acknowledge __secondary_hold_spinloop __start
27logo_linux_clut224 27logo_linux_clut224 btext_prepare_BAT
28reloc_got2 kernstart_addr memstart_addr linux_banner _stext 28reloc_got2 kernstart_addr memstart_addr linux_banner _stext
29__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC." 29__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
30 30
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index a255707e4aee..01bc9663360d 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
112 /* 112 /*
113 * This ensures that generic code that rely on IRQ disabling 113 * This ensures that generic code that rely on IRQ disabling
114 * to prevent a parallel THP split work as expected. 114 * to prevent a parallel THP split work as expected.
115 *
116 * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
117 * a special case check in pmd_access_permitted.
115 */ 118 */
116 serialize_against_pte_lookup(vma->vm_mm); 119 serialize_against_pte_lookup(vma->vm_mm);
117 return __pmd(old_pmd); 120 return __pmd(old_pmd);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 39d2f8012386..fc10c0c24f51 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
368 pdshift = PMD_SHIFT; 368 pdshift = PMD_SHIFT;
369 pmdp = pmd_offset(&pud, ea); 369 pmdp = pmd_offset(&pud, ea);
370 pmd = READ_ONCE(*pmdp); 370 pmd = READ_ONCE(*pmdp);
371
371 /* 372 /*
372 * A hugepage collapse is captured by pmd_none, because 373 * A hugepage collapse is captured by this condition, see
373 * it mark the pmd none and do a hpte invalidate. 374 * pmdp_collapse_flush.
374 */ 375 */
375 if (pmd_none(pmd)) 376 if (pmd_none(pmd))
376 return NULL; 377 return NULL;
377 378
379#ifdef CONFIG_PPC_BOOK3S_64
380 /*
381 * A hugepage split is captured by this condition, see
382 * pmdp_invalidate.
383 *
384 * Huge page modification can be caught here too.
385 */
386 if (pmd_is_serializing(pmd))
387 return NULL;
388#endif
389
378 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { 390 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
379 if (is_thp) 391 if (is_thp)
380 *is_thp = true; 392 *is_thp = true;