diff options
-rw-r--r-- | arch/um/include/as-layout.h | 19 | ||||
-rw-r--r-- | arch/um/include/common-offsets.h | 3 | ||||
-rw-r--r-- | arch/um/kernel/exec.c | 5 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 104 | ||||
-rw-r--r-- | arch/um/kernel/tlb.c | 11 | ||||
-rw-r--r-- | include/asm-um/mmu_context.h | 7 |
6 files changed, 75 insertions, 74 deletions
diff --git a/arch/um/include/as-layout.h b/arch/um/include/as-layout.h index a2008f550fee..606bb5c7fdf6 100644 --- a/arch/um/include/as-layout.h +++ b/arch/um/include/as-layout.h | |||
@@ -29,21 +29,10 @@ | |||
29 | #define _AC(X, Y) __AC(X, Y) | 29 | #define _AC(X, Y) __AC(X, Y) |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | /* | 32 | #define STUB_START _AC(, 0x100000) |
33 | * The "- 1"'s are to avoid gcc complaining about integer overflows | 33 | #define STUB_CODE _AC((unsigned long), STUB_START) |
34 | * and unrepresentable decimal constants. With 3-level page tables, | 34 | #define STUB_DATA _AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE) |
35 | * TASK_SIZE is 0x80000000, which gets turned into its signed decimal | 35 | #define STUB_END _AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE) |
36 | * equivalent in asm-offsets.s. gcc then complains about that being | ||
37 | * unsigned only in C90. To avoid that, UM_TASK_SIZE is defined as | ||
38 | * TASK_SIZE - 1. To compensate, we need to add the 1 back here. | ||
39 | * However, adding it back to UM_TASK_SIZE produces more gcc | ||
40 | * complaints. So, I adjust the thing being subtracted from | ||
41 | * UM_TASK_SIZE instead. Bah. | ||
42 | */ | ||
43 | #define STUB_CODE _AC((unsigned long), \ | ||
44 | UM_TASK_SIZE - (2 * UM_KERN_PAGE_SIZE - 1)) | ||
45 | #define STUB_DATA _AC((unsigned long), UM_TASK_SIZE - (UM_KERN_PAGE_SIZE - 1)) | ||
46 | #define STUB_START _AC(, STUB_CODE) | ||
47 | 36 | ||
48 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
49 | 38 | ||
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h index 5b67d7ced2a7..b54bd35585c2 100644 --- a/arch/um/include/common-offsets.h +++ b/arch/um/include/common-offsets.h | |||
@@ -39,6 +39,3 @@ DEFINE(UM_HZ, HZ); | |||
39 | DEFINE(UM_USEC_PER_SEC, USEC_PER_SEC); | 39 | DEFINE(UM_USEC_PER_SEC, USEC_PER_SEC); |
40 | DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); | 40 | DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); |
41 | DEFINE(UM_NSEC_PER_USEC, NSEC_PER_USEC); | 41 | DEFINE(UM_NSEC_PER_USEC, NSEC_PER_USEC); |
42 | |||
43 | /* See as-layout.h for an explanation of the "- 1". Bah. */ | ||
44 | DEFINE(UM_TASK_SIZE, TASK_SIZE - 1); | ||
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index bf66b5b7bc68..76a62c0cb2bc 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -19,12 +19,13 @@ | |||
19 | void flush_thread(void) | 19 | void flush_thread(void) |
20 | { | 20 | { |
21 | void *data = NULL; | 21 | void *data = NULL; |
22 | unsigned long end = proc_mm ? TASK_SIZE : STUB_START; | ||
23 | int ret; | 22 | int ret; |
24 | 23 | ||
25 | arch_flush_thread(¤t->thread.arch); | 24 | arch_flush_thread(¤t->thread.arch); |
26 | 25 | ||
27 | ret = unmap(¤t->mm->context.id, 0, end, 1, &data); | 26 | ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); |
27 | ret = ret || unmap(¤t->mm->context.id, STUB_END, | ||
28 | TASK_SIZE - STUB_END, 1, &data); | ||
28 | if (ret) { | 29 | if (ret) { |
29 | printk(KERN_ERR "flush_thread - clearing address space failed, " | 30 | printk(KERN_ERR "flush_thread - clearing address space failed, " |
30 | "err = %d\n", ret); | 31 | "err = %d\n", ret); |
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 6da9ab4f5a18..e8dc8540d444 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -34,25 +34,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | |||
34 | if (!pte) | 34 | if (!pte) |
35 | goto out_pte; | 35 | goto out_pte; |
36 | 36 | ||
37 | /* | ||
38 | * There's an interaction between the skas0 stub pages, stack | ||
39 | * randomization, and the BUG at the end of exit_mmap. exit_mmap | ||
40 | * checks that the number of page tables freed is the same as had | ||
41 | * been allocated. If the stack is on the last page table page, | ||
42 | * then the stack pte page will be freed, and if not, it won't. To | ||
43 | * avoid having to know where the stack is, or if the process mapped | ||
44 | * something at the top of its address space for some other reason, | ||
45 | * we set TASK_SIZE to end at the start of the last page table. | ||
46 | * This keeps exit_mmap off the last page, but introduces a leak | ||
47 | * of that page. So, we hang onto it here and free it in | ||
48 | * destroy_context_skas. | ||
49 | */ | ||
50 | |||
51 | mm->context.last_page_table = pmd_page_vaddr(*pmd); | ||
52 | #ifdef CONFIG_3_LEVEL_PGTABLES | ||
53 | mm->context.last_pmd = (unsigned long) __va(pud_val(*pud)); | ||
54 | #endif | ||
55 | |||
56 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); | 37 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
57 | *pte = pte_mkread(*pte); | 38 | *pte = pte_mkread(*pte); |
58 | return 0; | 39 | return 0; |
@@ -76,24 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
76 | stack = get_zeroed_page(GFP_KERNEL); | 57 | stack = get_zeroed_page(GFP_KERNEL); |
77 | if (stack == 0) | 58 | if (stack == 0) |
78 | goto out; | 59 | goto out; |
79 | |||
80 | /* | ||
81 | * This zeros the entry that pgd_alloc didn't, needed since | ||
82 | * we are about to reinitialize it, and want mm.nr_ptes to | ||
83 | * be accurate. | ||
84 | */ | ||
85 | mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); | ||
86 | |||
87 | ret = init_stub_pte(mm, STUB_CODE, | ||
88 | (unsigned long) &__syscall_stub_start); | ||
89 | if (ret) | ||
90 | goto out_free; | ||
91 | |||
92 | ret = init_stub_pte(mm, STUB_DATA, stack); | ||
93 | if (ret) | ||
94 | goto out_free; | ||
95 | |||
96 | mm->nr_ptes--; | ||
97 | } | 60 | } |
98 | 61 | ||
99 | to_mm->id.stack = stack; | 62 | to_mm->id.stack = stack; |
@@ -137,6 +100,64 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
137 | return ret; | 100 | return ret; |
138 | } | 101 | } |
139 | 102 | ||
103 | void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | ||
104 | { | ||
105 | struct page **pages; | ||
106 | int err, ret; | ||
107 | |||
108 | if (!skas_needs_stub) | ||
109 | return; | ||
110 | |||
111 | ret = init_stub_pte(mm, STUB_CODE, | ||
112 | (unsigned long) &__syscall_stub_start); | ||
113 | if (ret) | ||
114 | goto out; | ||
115 | |||
116 | ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); | ||
117 | if (ret) | ||
118 | goto out; | ||
119 | |||
120 | pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL); | ||
121 | if (pages == NULL) { | ||
122 | printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page " | ||
123 | "pointers\n"); | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | pages[0] = virt_to_page(&__syscall_stub_start); | ||
128 | pages[1] = virt_to_page(mm->context.id.stack); | ||
129 | |||
130 | /* dup_mmap already holds mmap_sem */ | ||
131 | err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, | ||
132 | VM_READ | VM_MAYREAD | VM_EXEC | | ||
133 | VM_MAYEXEC | VM_DONTCOPY, pages); | ||
134 | if (err) { | ||
135 | printk(KERN_ERR "install_special_mapping returned %d\n", err); | ||
136 | goto out_free; | ||
137 | } | ||
138 | return; | ||
139 | |||
140 | out_free: | ||
141 | kfree(pages); | ||
142 | out: | ||
143 | force_sigsegv(SIGSEGV, current); | ||
144 | } | ||
145 | |||
146 | void arch_exit_mmap(struct mm_struct *mm) | ||
147 | { | ||
148 | pte_t *pte; | ||
149 | |||
150 | pte = virt_to_pte(mm, STUB_CODE); | ||
151 | if (pte != NULL) | ||
152 | pte_clear(mm, STUB_CODE, pte); | ||
153 | |||
154 | pte = virt_to_pte(mm, STUB_DATA); | ||
155 | if (pte == NULL) | ||
156 | return; | ||
157 | |||
158 | pte_clear(mm, STUB_DATA, pte); | ||
159 | } | ||
160 | |||
140 | void destroy_context(struct mm_struct *mm) | 161 | void destroy_context(struct mm_struct *mm) |
141 | { | 162 | { |
142 | struct mm_context *mmu = &mm->context; | 163 | struct mm_context *mmu = &mm->context; |
@@ -146,15 +167,8 @@ void destroy_context(struct mm_struct *mm) | |||
146 | else | 167 | else |
147 | os_kill_ptraced_process(mmu->id.u.pid, 1); | 168 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
148 | 169 | ||
149 | if (!proc_mm || !ptrace_faultinfo) { | 170 | if (skas_needs_stub) |
150 | free_page(mmu->id.stack); | 171 | free_page(mmu->id.stack); |
151 | pte_lock_deinit(virt_to_page(mmu->last_page_table)); | ||
152 | pte_free_kernel(mm, (pte_t *) mmu->last_page_table); | ||
153 | dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE); | ||
154 | #ifdef CONFIG_3_LEVEL_PGTABLES | ||
155 | pmd_free(mm, (pmd_t *) mmu->last_pmd); | ||
156 | #endif | ||
157 | } | ||
158 | 172 | ||
159 | free_ldt(mmu); | 173 | free_ldt(mmu); |
160 | } | 174 | } |
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 429fed2f66b2..ef5a2a20d351 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -184,6 +184,9 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
184 | 184 | ||
185 | pte = pte_offset_kernel(pmd, addr); | 185 | pte = pte_offset_kernel(pmd, addr); |
186 | do { | 186 | do { |
187 | if ((addr >= STUB_START) && (addr < STUB_END)) | ||
188 | continue; | ||
189 | |||
187 | r = pte_read(*pte); | 190 | r = pte_read(*pte); |
188 | w = pte_write(*pte); | 191 | w = pte_write(*pte); |
189 | x = pte_exec(*pte); | 192 | x = pte_exec(*pte); |
@@ -486,9 +489,6 @@ void __flush_tlb_one(unsigned long addr) | |||
486 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | 489 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, |
487 | unsigned long end_addr, int force) | 490 | unsigned long end_addr, int force) |
488 | { | 491 | { |
489 | if (!proc_mm && (end_addr > STUB_START)) | ||
490 | end_addr = STUB_START; | ||
491 | |||
492 | fix_range_common(mm, start_addr, end_addr, force); | 492 | fix_range_common(mm, start_addr, end_addr, force); |
493 | } | 493 | } |
494 | 494 | ||
@@ -502,8 +502,6 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
502 | 502 | ||
503 | void flush_tlb_mm(struct mm_struct *mm) | 503 | void flush_tlb_mm(struct mm_struct *mm) |
504 | { | 504 | { |
505 | unsigned long end; | ||
506 | |||
507 | /* | 505 | /* |
508 | * Don't bother flushing if this address space is about to be | 506 | * Don't bother flushing if this address space is about to be |
509 | * destroyed. | 507 | * destroyed. |
@@ -511,8 +509,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
511 | if (atomic_read(&mm->mm_users) == 0) | 509 | if (atomic_read(&mm->mm_users) == 0) |
512 | return; | 510 | return; |
513 | 511 | ||
514 | end = proc_mm ? TASK_SIZE : STUB_START; | 512 | fix_range(mm, 0, TASK_SIZE, 0); |
515 | fix_range(mm, 0, end, 0); | ||
516 | } | 513 | } |
517 | 514 | ||
518 | void force_flush_all(void) | 515 | void force_flush_all(void) |
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h index 5f3b863aef9a..6686fc524ca1 100644 --- a/include/asm-um/mmu_context.h +++ b/include/asm-um/mmu_context.h | |||
@@ -6,11 +6,12 @@ | |||
6 | #ifndef __UM_MMU_CONTEXT_H | 6 | #ifndef __UM_MMU_CONTEXT_H |
7 | #define __UM_MMU_CONTEXT_H | 7 | #define __UM_MMU_CONTEXT_H |
8 | 8 | ||
9 | #include <asm-generic/mm_hooks.h> | ||
10 | |||
11 | #include "linux/sched.h" | 9 | #include "linux/sched.h" |
12 | #include "um_mmu.h" | 10 | #include "um_mmu.h" |
13 | 11 | ||
12 | extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); | ||
13 | extern void arch_exit_mmap(struct mm_struct *mm); | ||
14 | |||
14 | #define get_mmu_context(task) do ; while(0) | 15 | #define get_mmu_context(task) do ; while(0) |
15 | #define activate_context(tsk) do ; while(0) | 16 | #define activate_context(tsk) do ; while(0) |
16 | 17 | ||
@@ -30,6 +31,8 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) | |||
30 | */ | 31 | */ |
31 | if (old != new && (current->flags & PF_BORROWED_MM)) | 32 | if (old != new && (current->flags & PF_BORROWED_MM)) |
32 | __switch_mm(&new->context.id); | 33 | __switch_mm(&new->context.id); |
34 | |||
35 | arch_dup_mmap(old, new); | ||
33 | } | 36 | } |
34 | 37 | ||
35 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 38 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |