summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-08-21 06:20:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-09-20 18:36:53 -0400
commit4c0f5d1eb4072871c34530358df45f05ab80edd6 (patch)
treedb91d2da1a31b18e6da69dd089aa660c2686c7ef /arch/powerpc/mm
parent13c7bb3c57dcfe779ea5b4b083f6c47753cc5327 (diff)
powerpc/mm: Add a helper to select PAGE_KERNEL_RO or PAGE_READONLY
In a couple of places there is a need to select whether read-only protection of shadow pages is performed with PAGE_KERNEL_RO or with PAGE_READONLY. Add a helper to avoid duplicating the choice. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Cc: stable@vger.kernel.org Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/9f33f44b9cd741c4a02b3dce7b8ef9438fe2cd2a.1566382750.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 802387b231ad..e8ab3cc5f6e4 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -12,6 +12,14 @@
12#include <asm/code-patching.h> 12#include <asm/code-patching.h>
13#include <mm/mmu_decl.h> 13#include <mm/mmu_decl.h>
14 14
15static pgprot_t kasan_prot_ro(void)
16{
17 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
18 return PAGE_READONLY;
19
20 return PAGE_KERNEL_RO;
21}
22
15static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) 23static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
16{ 24{
17 unsigned long va = (unsigned long)kasan_early_shadow_page; 25 unsigned long va = (unsigned long)kasan_early_shadow_page;
@@ -26,6 +34,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
26{ 34{
27 pmd_t *pmd; 35 pmd_t *pmd;
28 unsigned long k_cur, k_next; 36 unsigned long k_cur, k_next;
37 pgprot_t prot = kasan_prot_ro();
29 38
30 pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); 39 pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
31 40
@@ -43,10 +52,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
43 52
44 if (!new) 53 if (!new)
45 return -ENOMEM; 54 return -ENOMEM;
46 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) 55 kasan_populate_pte(new, prot);
47 kasan_populate_pte(new, PAGE_READONLY);
48 else
49 kasan_populate_pte(new, PAGE_KERNEL_RO);
50 56
51 smp_wmb(); /* See comment in __pte_alloc */ 57 smp_wmb(); /* See comment in __pte_alloc */
52 58
@@ -103,10 +109,9 @@ static int __ref kasan_init_region(void *start, size_t size)
103 109
104static void __init kasan_remap_early_shadow_ro(void) 110static void __init kasan_remap_early_shadow_ro(void)
105{ 111{
106 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) 112 pgprot_t prot = kasan_prot_ro();
107 kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY); 113
108 else 114 kasan_populate_pte(kasan_early_shadow_pte, prot);
109 kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
110 115
111 flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); 116 flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
112} 117}