aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/mmu_context.h29
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/kernel/process.c38
4 files changed, 31 insertions, 42 deletions
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686e..38050b1c4800 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,8 +130,4 @@ struct mm_struct;
130extern unsigned long arch_randomize_brk(struct mm_struct *mm); 130extern unsigned long arch_randomize_brk(struct mm_struct *mm);
131#define arch_randomize_brk arch_randomize_brk 131#define arch_randomize_brk arch_randomize_brk
132 132
133extern int vectors_user_mapping(void);
134#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
135#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
136
137#endif 133#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 71605d9f8e42..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h> 19#include <asm/cachetype.h>
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h>
21 22
22void __check_kvm_seq(struct mm_struct *mm); 23void __check_kvm_seq(struct mm_struct *mm);
23 24
@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
133#define deactivate_mm(tsk,mm) do { } while (0) 134#define deactivate_mm(tsk,mm) do { } while (0)
134#define activate_mm(prev,next) switch_mm(prev, next, NULL) 135#define activate_mm(prev,next) switch_mm(prev, next, NULL)
135 136
136/*
137 * We are inserting a "fake" vma for the user-accessible vector page so
138 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139 * But we also want to remove it before the generic code gets to see it
140 * during process exit or the unmapping of it would cause total havoc.
141 * (the macro is used as remove_vma() is static to mm/mmap.c)
142 */
143#define arch_exit_mmap(mm) \
144do { \
145 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146 if (high_vma) { \
147 BUG_ON(high_vma->vm_next); /* it should be last */ \
148 if (high_vma->vm_prev) \
149 high_vma->vm_prev->vm_next = NULL; \
150 else \
151 mm->mmap = NULL; \
152 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153 mm->mmap_cache = NULL; \
154 mm->map_count--; \
155 remove_vma(high_vma); \
156 } \
157} while (0)
158
159static inline void arch_dup_mmap(struct mm_struct *oldmm,
160 struct mm_struct *mm)
161{
162}
163
164#endif 137#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 97b440c25c58..5838361c48b3 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
152extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
153 153
154#define __HAVE_ARCH_GATE_AREA 1
155
154#ifdef CONFIG_ARM_LPAE 156#ifdef CONFIG_ARM_LPAE
155#include <asm/pgtable-3level-types.h> 157#include <asm/pgtable-3level-types.h>
156#else 158#else
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 971d65c253a9..e11b523db332 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
526#ifdef CONFIG_MMU 526#ifdef CONFIG_MMU
527/* 527/*
528 * The vectors page is always readable from user space for the 528 * The vectors page is always readable from user space for the
529 * atomic helpers and the signal restart code. Let's declare a mapping 529 * atomic helpers and the signal restart code. Insert it into the
530 * for it so it is visible through ptrace and /proc/<pid>/mem. 530 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
531 */ 531 */
532static struct vm_area_struct gate_vma;
532 533
533int vectors_user_mapping(void) 534static int __init gate_vma_init(void)
534{ 535{
535 struct mm_struct *mm = current->mm; 536 gate_vma.vm_start = 0xffff0000;
536 return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, 537 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
537 VM_READ | VM_EXEC | 538 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
538 VM_MAYREAD | VM_MAYEXEC | 539 gate_vma.vm_flags = VM_READ | VM_EXEC |
539 VM_ALWAYSDUMP | VM_RESERVED, 540 VM_MAYREAD | VM_MAYEXEC |
540 NULL); 541 VM_ALWAYSDUMP;
542 return 0;
543}
544arch_initcall(gate_vma_init);
545
546struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
547{
548 return &gate_vma;
549}
550
551int in_gate_area(struct mm_struct *mm, unsigned long addr)
552{
553 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
554}
555
556int in_gate_area_no_mm(unsigned long addr)
557{
558 return in_gate_area(NULL, addr);
541} 559}
542 560
543const char *arch_vma_name(struct vm_area_struct *vma) 561const char *arch_vma_name(struct vm_area_struct *vma)
544{ 562{
545 return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; 563 return (vma == &gate_vma) ? "[vectors]" : NULL;
546} 564}
547#endif 565#endif