aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-01-27 04:50:19 -0500
committerWill Deacon <will.deacon@arm.com>2016-02-02 10:42:15 -0500
commit95f5c80050ad723163aa80dc8bffd48ef4afc6d5 (patch)
tree8ea41807b5039f785d45e01ae11b5ec128a537c1
parent36f90b0a2ddd60823fe193a85e60ff1906c2a9b3 (diff)
arm64: allow vmalloc regions to be set with set_memory_*
The range of set_memory_* is currently restricted to the module address range because of difficulties in breaking down larger block sizes. vmalloc maps PAGE_SIZE pages so it is safe to use as well. Update the function ranges and add a comment explaining why the range is restricted the way it is. Suggested-by: Laura Abbott <labbott@fedoraproject.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/mm/pageattr.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index cf6240741134..0795c3a36d8f 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -14,6 +14,7 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/vmalloc.h>
17 18
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
@@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages,
44 unsigned long end = start + size; 45 unsigned long end = start + size;
45 int ret; 46 int ret;
46 struct page_change_data data; 47 struct page_change_data data;
48 struct vm_struct *area;
47 49
48 if (!PAGE_ALIGNED(addr)) { 50 if (!PAGE_ALIGNED(addr)) {
49 start &= PAGE_MASK; 51 start &= PAGE_MASK;
@@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages,
51 WARN_ON_ONCE(1); 53 WARN_ON_ONCE(1);
52 } 54 }
53 55
54 if (start < MODULES_VADDR || start >= MODULES_END) 56 /*
55 return -EINVAL; 57 * Kernel VA mappings are always live, and splitting live section
56 58 * mappings into page mappings may cause TLB conflicts. This means
57 if (end < MODULES_VADDR || end >= MODULES_END) 59 * we have to ensure that changing the permission bits of the range
60 * we are operating on does not result in such splitting.
61 *
62 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
63 * Those are guaranteed to consist entirely of page mappings, and
64 * splitting is never needed.
65 *
66 * So check whether the [addr, addr + size) interval is entirely
67 * covered by precisely one VM area that has the VM_ALLOC flag set.
68 */
69 area = find_vm_area((void *)addr);
70 if (!area ||
71 end > (unsigned long)area->addr + area->size ||
72 !(area->flags & VM_ALLOC))
58 return -EINVAL; 73 return -EINVAL;
59 74
60 if (!numpages) 75 if (!numpages)