aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/skas/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel/skas/mmu.c')
-rw-r--r--arch/um/kernel/skas/mmu.c127
1 files changed, 79 insertions, 48 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index f859ec306cd5..78b3e9f69d57 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -34,33 +34,14 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
34 if (!pte) 34 if (!pte)
35 goto out_pte; 35 goto out_pte;
36 36
37 /*
38 * There's an interaction between the skas0 stub pages, stack
39 * randomization, and the BUG at the end of exit_mmap. exit_mmap
40 * checks that the number of page tables freed is the same as had
41 * been allocated. If the stack is on the last page table page,
42 * then the stack pte page will be freed, and if not, it won't. To
43 * avoid having to know where the stack is, or if the process mapped
44 * something at the top of its address space for some other reason,
45 * we set TASK_SIZE to end at the start of the last page table.
46 * This keeps exit_mmap off the last page, but introduces a leak
47 * of that page. So, we hang onto it here and free it in
48 * destroy_context_skas.
49 */
50
51 mm->context.last_page_table = pmd_page_vaddr(*pmd);
52#ifdef CONFIG_3_LEVEL_PGTABLES
53 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
54#endif
55
56 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 37 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
57 *pte = pte_mkread(*pte); 38 *pte = pte_mkread(*pte);
58 return 0; 39 return 0;
59 40
60 out_pmd: 41 out_pmd:
61 pud_free(pud); 42 pud_free(mm, pud);
62 out_pte: 43 out_pte:
63 pmd_free(pmd); 44 pmd_free(mm, pmd);
64 out: 45 out:
65 return -ENOMEM; 46 return -ENOMEM;
66} 47}
@@ -76,24 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
76 stack = get_zeroed_page(GFP_KERNEL); 57 stack = get_zeroed_page(GFP_KERNEL);
77 if (stack == 0) 58 if (stack == 0)
78 goto out; 59 goto out;
79
80 /*
81 * This zeros the entry that pgd_alloc didn't, needed since
82 * we are about to reinitialize it, and want mm.nr_ptes to
83 * be accurate.
84 */
85 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
86
87 ret = init_stub_pte(mm, STUB_CODE,
88 (unsigned long) &__syscall_stub_start);
89 if (ret)
90 goto out_free;
91
92 ret = init_stub_pte(mm, STUB_DATA, stack);
93 if (ret)
94 goto out_free;
95
96 mm->nr_ptes--;
97 } 60 }
98 61
99 to_mm->id.stack = stack; 62 to_mm->id.stack = stack;
@@ -114,6 +77,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
114 to_mm->id.u.pid = copy_context_skas0(stack, 77 to_mm->id.u.pid = copy_context_skas0(stack,
115 from_mm->id.u.pid); 78 from_mm->id.u.pid);
116 else to_mm->id.u.pid = start_userspace(stack); 79 else to_mm->id.u.pid = start_userspace(stack);
80
81 if (to_mm->id.u.pid < 0) {
82 ret = to_mm->id.u.pid;
83 goto out_free;
84 }
117 } 85 }
118 86
119 ret = init_new_ldt(to_mm, from_mm); 87 ret = init_new_ldt(to_mm, from_mm);
@@ -132,24 +100,87 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
132 return ret; 100 return ret;
133} 101}
134 102
103void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
104{
105 struct page **pages;
106 int err, ret;
107
108 if (!skas_needs_stub)
109 return;
110
111 ret = init_stub_pte(mm, STUB_CODE,
112 (unsigned long) &__syscall_stub_start);
113 if (ret)
114 goto out;
115
116 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
117 if (ret)
118 goto out;
119
120 pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
121 if (pages == NULL) {
122 printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
123 "pointers\n");
124 goto out;
125 }
126
127 pages[0] = virt_to_page(&__syscall_stub_start);
128 pages[1] = virt_to_page(mm->context.id.stack);
129
130 /* dup_mmap already holds mmap_sem */
131 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
132 VM_READ | VM_MAYREAD | VM_EXEC |
133 VM_MAYEXEC | VM_DONTCOPY, pages);
134 if (err) {
135 printk(KERN_ERR "install_special_mapping returned %d\n", err);
136 goto out_free;
137 }
138 return;
139
140out_free:
141 kfree(pages);
142out:
143 force_sigsegv(SIGSEGV, current);
144}
145
146void arch_exit_mmap(struct mm_struct *mm)
147{
148 pte_t *pte;
149
150 pte = virt_to_pte(mm, STUB_CODE);
151 if (pte != NULL)
152 pte_clear(mm, STUB_CODE, pte);
153
154 pte = virt_to_pte(mm, STUB_DATA);
155 if (pte == NULL)
156 return;
157
158 pte_clear(mm, STUB_DATA, pte);
159}
160
135void destroy_context(struct mm_struct *mm) 161void destroy_context(struct mm_struct *mm)
136{ 162{
137 struct mm_context *mmu = &mm->context; 163 struct mm_context *mmu = &mm->context;
138 164
139 if (proc_mm) 165 if (proc_mm)
140 os_close_file(mmu->id.u.mm_fd); 166 os_close_file(mmu->id.u.mm_fd);
141 else 167 else {
168 /*
169 * If init_new_context wasn't called, this will be
170 * zero, resulting in a kill(0), which will result in the
171 * whole UML suddenly dying. Also, cover negative and
172 * 1 cases, since they shouldn't happen either.
173 */
174 if (mmu->id.u.pid < 2) {
175 printk(KERN_ERR "corrupt mm_context - pid = %d\n",
176 mmu->id.u.pid);
177 return;
178 }
142 os_kill_ptraced_process(mmu->id.u.pid, 1); 179 os_kill_ptraced_process(mmu->id.u.pid, 1);
180 }
143 181
144 if (!proc_mm || !ptrace_faultinfo) { 182 if (skas_needs_stub)
145 free_page(mmu->id.stack); 183 free_page(mmu->id.stack);
146 pte_lock_deinit(virt_to_page(mmu->last_page_table));
147 pte_free_kernel((pte_t *) mmu->last_page_table);
148 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
149#ifdef CONFIG_3_LEVEL_PGTABLES
150 pmd_free((pmd_t *) mmu->last_pmd);
151#endif
152 }
153 184
154 free_ldt(mmu); 185 free_ldt(mmu);
155} 186}