diff options
author | Kees Cook <keescook@chromium.org> | 2014-04-03 20:28:11 -0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2014-10-16 17:38:54 -0400 |
commit | 1e6b48116a95046ec51f3d40f83aff8b006674d7 (patch) | |
tree | 1c18e08416613ef84513cb2cd52679e7af6d4d7c /arch/arm | |
parent | 23a4e4050ba9c98ab67db0980a9fb20e5096d9ea (diff) |
ARM: mm: allow non-text sections to be non-executable
Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions
into section-sized areas that can have different permisions. Performs
the NX permission changes during free_initmem, so that init memory can be
reclaimed.
This uses section size instead of PMD size to reduce memory lost to
padding on non-LPAE systems.
Based on work by Brad Spengler, Larry Bassel, and Laura Abbott.
Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 16 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 9 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 101 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 9 |
4 files changed, 133 insertions, 2 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6f57cb94367f..18fd68a295ea 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -8,6 +8,9 @@ | |||
8 | #include <asm/thread_info.h> | 8 | #include <asm/thread_info.h> |
9 | #include <asm/memory.h> | 9 | #include <asm/memory.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | #ifdef CONFIG_ARM_KERNMEM_PERMS | ||
12 | #include <asm/pgtable.h> | ||
13 | #endif | ||
11 | 14 | ||
12 | #define PROC_INFO \ | 15 | #define PROC_INFO \ |
13 | . = ALIGN(4); \ | 16 | . = ALIGN(4); \ |
@@ -90,6 +93,11 @@ SECTIONS | |||
90 | _text = .; | 93 | _text = .; |
91 | HEAD_TEXT | 94 | HEAD_TEXT |
92 | } | 95 | } |
96 | |||
97 | #ifdef CONFIG_ARM_KERNMEM_PERMS | ||
98 | . = ALIGN(1<<SECTION_SHIFT); | ||
99 | #endif | ||
100 | |||
93 | .text : { /* Real text segment */ | 101 | .text : { /* Real text segment */ |
94 | _stext = .; /* Text and read-only data */ | 102 | _stext = .; /* Text and read-only data */ |
95 | __exception_text_start = .; | 103 | __exception_text_start = .; |
@@ -145,7 +153,11 @@ SECTIONS | |||
145 | _etext = .; /* End of text and rodata section */ | 153 | _etext = .; /* End of text and rodata section */ |
146 | 154 | ||
147 | #ifndef CONFIG_XIP_KERNEL | 155 | #ifndef CONFIG_XIP_KERNEL |
156 | # ifdef CONFIG_ARM_KERNMEM_PERMS | ||
157 | . = ALIGN(1<<SECTION_SHIFT); | ||
158 | # else | ||
148 | . = ALIGN(PAGE_SIZE); | 159 | . = ALIGN(PAGE_SIZE); |
160 | # endif | ||
149 | __init_begin = .; | 161 | __init_begin = .; |
150 | #endif | 162 | #endif |
151 | /* | 163 | /* |
@@ -220,7 +232,11 @@ SECTIONS | |||
220 | . = PAGE_OFFSET + TEXT_OFFSET; | 232 | . = PAGE_OFFSET + TEXT_OFFSET; |
221 | #else | 233 | #else |
222 | __init_end = .; | 234 | __init_end = .; |
235 | #ifdef CONFIG_ARM_KERNMEM_PERMS | ||
236 | . = ALIGN(1<<SECTION_SHIFT); | ||
237 | #else | ||
223 | . = ALIGN(THREAD_SIZE); | 238 | . = ALIGN(THREAD_SIZE); |
239 | #endif | ||
224 | __data_loc = .; | 240 | __data_loc = .; |
225 | #endif | 241 | #endif |
226 | 242 | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ae69809a9e47..7a0756df91a2 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -1008,3 +1008,12 @@ config ARCH_SUPPORTS_BIG_ENDIAN | |||
1008 | help | 1008 | help |
1009 | This option specifies the architecture can support big endian | 1009 | This option specifies the architecture can support big endian |
1010 | operation. | 1010 | operation. |
1011 | |||
1012 | config ARM_KERNMEM_PERMS | ||
1013 | bool "Restrict kernel memory permissions" | ||
1014 | help | ||
1015 | If this is set, kernel memory other than kernel text (and rodata) | ||
1016 | will be made non-executable. The tradeoff is that each region is | ||
1017 | padded to section-size (1MiB) boundaries (because their permissions | ||
1018 | are different and splitting the 1M pages into 4K ones causes TLB | ||
1019 | performance problems), wasting memory. | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad82c05bfc3a..e6bfe76b2f59 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/prom.h> | 29 | #include <asm/prom.h> |
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
32 | #include <asm/system_info.h> | ||
32 | #include <asm/tlb.h> | 33 | #include <asm/tlb.h> |
33 | #include <asm/fixmap.h> | 34 | #include <asm/fixmap.h> |
34 | 35 | ||
@@ -615,7 +616,99 @@ void __init mem_init(void) | |||
615 | } | 616 | } |
616 | } | 617 | } |
617 | 618 | ||
618 | void free_initmem(void) | 619 | #ifdef CONFIG_ARM_KERNMEM_PERMS |
620 | struct section_perm { | ||
621 | unsigned long start; | ||
622 | unsigned long end; | ||
623 | pmdval_t mask; | ||
624 | pmdval_t prot; | ||
625 | }; | ||
626 | |||
627 | struct section_perm nx_perms[] = { | ||
628 | /* Make pages tables, etc before _stext RW (set NX). */ | ||
629 | { | ||
630 | .start = PAGE_OFFSET, | ||
631 | .end = (unsigned long)_stext, | ||
632 | .mask = ~PMD_SECT_XN, | ||
633 | .prot = PMD_SECT_XN, | ||
634 | }, | ||
635 | /* Make init RW (set NX). */ | ||
636 | { | ||
637 | .start = (unsigned long)__init_begin, | ||
638 | .end = (unsigned long)_sdata, | ||
639 | .mask = ~PMD_SECT_XN, | ||
640 | .prot = PMD_SECT_XN, | ||
641 | }, | ||
642 | }; | ||
643 | |||
644 | /* | ||
645 | * Updates section permissions only for the current mm (sections are | ||
646 | * copied into each mm). During startup, this is the init_mm. Is only | ||
647 | * safe to be called with preemption disabled, as under stop_machine(). | ||
648 | */ | ||
649 | static inline void section_update(unsigned long addr, pmdval_t mask, | ||
650 | pmdval_t prot) | ||
651 | { | ||
652 | struct mm_struct *mm; | ||
653 | pmd_t *pmd; | ||
654 | |||
655 | mm = current->active_mm; | ||
656 | pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); | ||
657 | |||
658 | #ifdef CONFIG_ARM_LPAE | ||
659 | pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); | ||
660 | #else | ||
661 | if (addr & SECTION_SIZE) | ||
662 | pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); | ||
663 | else | ||
664 | pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); | ||
665 | #endif | ||
666 | flush_pmd_entry(pmd); | ||
667 | local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); | ||
668 | } | ||
669 | |||
670 | /* Make sure extended page tables are in use. */ | ||
671 | static inline bool arch_has_strict_perms(void) | ||
672 | { | ||
673 | if (cpu_architecture() < CPU_ARCH_ARMv6) | ||
674 | return false; | ||
675 | |||
676 | return !!(get_cr() & CR_XP); | ||
677 | } | ||
678 | |||
679 | #define set_section_perms(perms, field) { \ | ||
680 | size_t i; \ | ||
681 | unsigned long addr; \ | ||
682 | \ | ||
683 | if (!arch_has_strict_perms()) \ | ||
684 | return; \ | ||
685 | \ | ||
686 | for (i = 0; i < ARRAY_SIZE(perms); i++) { \ | ||
687 | if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ | ||
688 | !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ | ||
689 | pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ | ||
690 | perms[i].start, perms[i].end, \ | ||
691 | SECTION_SIZE); \ | ||
692 | continue; \ | ||
693 | } \ | ||
694 | \ | ||
695 | for (addr = perms[i].start; \ | ||
696 | addr < perms[i].end; \ | ||
697 | addr += SECTION_SIZE) \ | ||
698 | section_update(addr, perms[i].mask, \ | ||
699 | perms[i].field); \ | ||
700 | } \ | ||
701 | } | ||
702 | |||
703 | static inline void fix_kernmem_perms(void) | ||
704 | { | ||
705 | set_section_perms(nx_perms, prot); | ||
706 | } | ||
707 | #else | ||
708 | static inline void fix_kernmem_perms(void) { } | ||
709 | #endif /* CONFIG_ARM_KERNMEM_PERMS */ | ||
710 | |||
711 | void free_tcmmem(void) | ||
619 | { | 712 | { |
620 | #ifdef CONFIG_HAVE_TCM | 713 | #ifdef CONFIG_HAVE_TCM |
621 | extern char __tcm_start, __tcm_end; | 714 | extern char __tcm_start, __tcm_end; |
@@ -623,6 +716,12 @@ void free_initmem(void) | |||
623 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | 716 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); |
624 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); | 717 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); |
625 | #endif | 718 | #endif |
719 | } | ||
720 | |||
721 | void free_initmem(void) | ||
722 | { | ||
723 | fix_kernmem_perms(); | ||
724 | free_tcmmem(); | ||
626 | 725 | ||
627 | poison_init_mem(__init_begin, __init_end - __init_begin); | 726 | poison_init_mem(__init_begin, __init_end - __init_begin); |
628 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 727 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index bdf5c94f7c36..1c52c8e94372 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -1373,13 +1373,20 @@ static void __init map_lowmem(void) | |||
1373 | if (start >= end) | 1373 | if (start >= end) |
1374 | break; | 1374 | break; |
1375 | 1375 | ||
1376 | if (end < kernel_x_start || start >= kernel_x_end) { | 1376 | if (end < kernel_x_start) { |
1377 | map.pfn = __phys_to_pfn(start); | 1377 | map.pfn = __phys_to_pfn(start); |
1378 | map.virtual = __phys_to_virt(start); | 1378 | map.virtual = __phys_to_virt(start); |
1379 | map.length = end - start; | 1379 | map.length = end - start; |
1380 | map.type = MT_MEMORY_RWX; | 1380 | map.type = MT_MEMORY_RWX; |
1381 | 1381 | ||
1382 | create_mapping(&map); | 1382 | create_mapping(&map); |
1383 | } else if (start >= kernel_x_end) { | ||
1384 | map.pfn = __phys_to_pfn(start); | ||
1385 | map.virtual = __phys_to_virt(start); | ||
1386 | map.length = end - start; | ||
1387 | map.type = MT_MEMORY_RW; | ||
1388 | |||
1389 | create_mapping(&map); | ||
1383 | } else { | 1390 | } else { |
1384 | /* This better cover the entire kernel */ | 1391 | /* This better cover the entire kernel */ |
1385 | if (start < kernel_x_start) { | 1392 | if (start < kernel_x_start) { |