aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2014-08-19 15:41:43 -0400
committerWill Deacon <will.deacon@arm.com>2014-09-08 09:39:18 -0400
commit11d91a770f1fff44dafdf88d6089a3451f99c9b6 (patch)
treedb7874516b61d7df4d85e4529b9ea6e1033624e6
parentb6d4f2800b7bad654caf00654f4bff21594ef838 (diff)
arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support
In a similar fashion to other architecture, add the infrastructure and Kconfig to enable DEBUG_SET_MODULE_RONX support. When enabled, module ranges will be marked read-only/no-execute as appropriate. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> [will: fixed off-by-one in module end check] Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/pageattr.c96
4 files changed, 112 insertions, 1 deletions
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4ee8e90b7a45..0a12933e50ed 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
43 of TEXT_OFFSET and platforms must not require a specific 43 of TEXT_OFFSET and platforms must not require a specific
44 value. 44 value.
45 45
46config DEBUG_SET_MODULE_RONX
47 bool "Set loadable kernel module data as NX and text as RO"
48 depends on MODULES
49 help
50 This option helps catch unintended modifications to loadable
51 kernel module's text and read-only data. It also prevents execution
52 of module data. Such protection may interfere with run-time code
53 patching and dynamic kernel tracing - and they might also protect
54 against certain classes of kernel exploits.
55 If in doubt, say "N".
56
46endmenu 57endmenu
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index f2defe1c380c..689b6379188c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
148{ 148{
149} 149}
150 150
151int set_memory_ro(unsigned long addr, int numpages);
152int set_memory_rw(unsigned long addr, int numpages);
153int set_memory_x(unsigned long addr, int numpages);
154int set_memory_nx(unsigned long addr, int numpages);
151#endif 155#endif
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3ecb56c624d3..c56179ed2c09 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
1obj-y := dma-mapping.o extable.o fault.o init.o \ 1obj-y := dma-mapping.o extable.o fault.o init.o \
2 cache.o copypage.o flush.o \ 2 cache.o copypage.o flush.o \
3 ioremap.o mmap.o pgd.o mmu.o \ 3 ioremap.o mmap.o pgd.o mmu.o \
4 context.o proc.o 4 context.o proc.o pageattr.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644
index 000000000000..75e744e4cec5
--- /dev/null
+++ b/arch/arm64/mm/pageattr.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17
18#include <asm/pgtable.h>
19#include <asm/tlbflush.h>
20
21struct page_change_data {
22 pgprot_t set_mask;
23 pgprot_t clear_mask;
24};
25
26static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
27 void *data)
28{
29 struct page_change_data *cdata = data;
30 pte_t pte = *ptep;
31
32 pte = clear_pte_bit(pte, cdata->clear_mask);
33 pte = set_pte_bit(pte, cdata->set_mask);
34
35 set_pte(ptep, pte);
36 return 0;
37}
38
39static int change_memory_common(unsigned long addr, int numpages,
40 pgprot_t set_mask, pgprot_t clear_mask)
41{
42 unsigned long start = addr;
43 unsigned long size = PAGE_SIZE*numpages;
44 unsigned long end = start + size;
45 int ret;
46 struct page_change_data data;
47
48 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
49 addr &= PAGE_MASK;
50 WARN_ON_ONCE(1);
51 }
52
53 if (!is_module_address(start) || !is_module_address(end - 1))
54 return -EINVAL;
55
56 data.set_mask = set_mask;
57 data.clear_mask = clear_mask;
58
59 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
60 &data);
61
62 flush_tlb_kernel_range(start, end);
63 return ret;
64}
65
66int set_memory_ro(unsigned long addr, int numpages)
67{
68 return change_memory_common(addr, numpages,
69 __pgprot(PTE_RDONLY),
70 __pgprot(PTE_WRITE));
71}
72EXPORT_SYMBOL_GPL(set_memory_ro);
73
74int set_memory_rw(unsigned long addr, int numpages)
75{
76 return change_memory_common(addr, numpages,
77 __pgprot(PTE_WRITE),
78 __pgprot(PTE_RDONLY));
79}
80EXPORT_SYMBOL_GPL(set_memory_rw);
81
82int set_memory_nx(unsigned long addr, int numpages)
83{
84 return change_memory_common(addr, numpages,
85 __pgprot(PTE_PXN),
86 __pgprot(0));
87}
88EXPORT_SYMBOL_GPL(set_memory_nx);
89
90int set_memory_x(unsigned long addr, int numpages)
91{
92 return change_memory_common(addr, numpages,
93 __pgprot(0),
94 __pgprot(PTE_PXN));
95}
96EXPORT_SYMBOL_GPL(set_memory_x);