diff options
Diffstat (limited to 'arch/um/kernel/skas/mmu.c')
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 104 |
1 files changed, 59 insertions, 45 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 6da9ab4f5a18..e8dc8540d444 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -34,25 +34,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | |||
34 | if (!pte) | 34 | if (!pte) |
35 | goto out_pte; | 35 | goto out_pte; |
36 | 36 | ||
37 | /* | ||
38 | * There's an interaction between the skas0 stub pages, stack | ||
39 | * randomization, and the BUG at the end of exit_mmap. exit_mmap | ||
40 | * checks that the number of page tables freed is the same as had | ||
41 | * been allocated. If the stack is on the last page table page, | ||
42 | * then the stack pte page will be freed, and if not, it won't. To | ||
43 | * avoid having to know where the stack is, or if the process mapped | ||
44 | * something at the top of its address space for some other reason, | ||
45 | * we set TASK_SIZE to end at the start of the last page table. | ||
46 | * This keeps exit_mmap off the last page, but introduces a leak | ||
47 | * of that page. So, we hang onto it here and free it in | ||
48 | * destroy_context_skas. | ||
49 | */ | ||
50 | |||
51 | mm->context.last_page_table = pmd_page_vaddr(*pmd); | ||
52 | #ifdef CONFIG_3_LEVEL_PGTABLES | ||
53 | mm->context.last_pmd = (unsigned long) __va(pud_val(*pud)); | ||
54 | #endif | ||
55 | |||
56 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); | 37 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
57 | *pte = pte_mkread(*pte); | 38 | *pte = pte_mkread(*pte); |
58 | return 0; | 39 | return 0; |
@@ -76,24 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
76 | stack = get_zeroed_page(GFP_KERNEL); | 57 | stack = get_zeroed_page(GFP_KERNEL); |
77 | if (stack == 0) | 58 | if (stack == 0) |
78 | goto out; | 59 | goto out; |
79 | |||
80 | /* | ||
81 | * This zeros the entry that pgd_alloc didn't, needed since | ||
82 | * we are about to reinitialize it, and want mm.nr_ptes to | ||
83 | * be accurate. | ||
84 | */ | ||
85 | mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); | ||
86 | |||
87 | ret = init_stub_pte(mm, STUB_CODE, | ||
88 | (unsigned long) &__syscall_stub_start); | ||
89 | if (ret) | ||
90 | goto out_free; | ||
91 | |||
92 | ret = init_stub_pte(mm, STUB_DATA, stack); | ||
93 | if (ret) | ||
94 | goto out_free; | ||
95 | |||
96 | mm->nr_ptes--; | ||
97 | } | 60 | } |
98 | 61 | ||
99 | to_mm->id.stack = stack; | 62 | to_mm->id.stack = stack; |
@@ -137,6 +100,64 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
137 | return ret; | 100 | return ret; |
138 | } | 101 | } |
139 | 102 | ||
103 | void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | ||
104 | { | ||
105 | struct page **pages; | ||
106 | int err, ret; | ||
107 | |||
108 | if (!skas_needs_stub) | ||
109 | return; | ||
110 | |||
111 | ret = init_stub_pte(mm, STUB_CODE, | ||
112 | (unsigned long) &__syscall_stub_start); | ||
113 | if (ret) | ||
114 | goto out; | ||
115 | |||
116 | ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); | ||
117 | if (ret) | ||
118 | goto out; | ||
119 | |||
120 | pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL); | ||
121 | if (pages == NULL) { | ||
122 | printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page " | ||
123 | "pointers\n"); | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | pages[0] = virt_to_page(&__syscall_stub_start); | ||
128 | pages[1] = virt_to_page(mm->context.id.stack); | ||
129 | |||
130 | /* dup_mmap already holds mmap_sem */ | ||
131 | err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, | ||
132 | VM_READ | VM_MAYREAD | VM_EXEC | | ||
133 | VM_MAYEXEC | VM_DONTCOPY, pages); | ||
134 | if (err) { | ||
135 | printk(KERN_ERR "install_special_mapping returned %d\n", err); | ||
136 | goto out_free; | ||
137 | } | ||
138 | return; | ||
139 | |||
140 | out_free: | ||
141 | kfree(pages); | ||
142 | out: | ||
143 | force_sigsegv(SIGSEGV, current); | ||
144 | } | ||
145 | |||
146 | void arch_exit_mmap(struct mm_struct *mm) | ||
147 | { | ||
148 | pte_t *pte; | ||
149 | |||
150 | pte = virt_to_pte(mm, STUB_CODE); | ||
151 | if (pte != NULL) | ||
152 | pte_clear(mm, STUB_CODE, pte); | ||
153 | |||
154 | pte = virt_to_pte(mm, STUB_DATA); | ||
155 | if (pte == NULL) | ||
156 | return; | ||
157 | |||
158 | pte_clear(mm, STUB_DATA, pte); | ||
159 | } | ||
160 | |||
140 | void destroy_context(struct mm_struct *mm) | 161 | void destroy_context(struct mm_struct *mm) |
141 | { | 162 | { |
142 | struct mm_context *mmu = &mm->context; | 163 | struct mm_context *mmu = &mm->context; |
@@ -146,15 +167,8 @@ void destroy_context(struct mm_struct *mm) | |||
146 | else | 167 | else |
147 | os_kill_ptraced_process(mmu->id.u.pid, 1); | 168 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
148 | 169 | ||
149 | if (!proc_mm || !ptrace_faultinfo) { | 170 | if (skas_needs_stub) |
150 | free_page(mmu->id.stack); | 171 | free_page(mmu->id.stack); |
151 | pte_lock_deinit(virt_to_page(mmu->last_page_table)); | ||
152 | pte_free_kernel(mm, (pte_t *) mmu->last_page_table); | ||
153 | dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE); | ||
154 | #ifdef CONFIG_3_LEVEL_PGTABLES | ||
155 | pmd_free(mm, (pmd_t *) mmu->last_pmd); | ||
156 | #endif | ||
157 | } | ||
158 | 172 | ||
159 | free_ldt(mmu); | 173 | free_ldt(mmu); |
160 | } | 174 | } |