aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/kernel/tlb.c202
1 files changed, 120 insertions, 82 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index e40f3352bd29..153dec13cf97 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -6,6 +6,7 @@
6#include "linux/mm.h" 6#include "linux/mm.h"
7#include "asm/page.h" 7#include "asm/page.h"
8#include "asm/pgalloc.h" 8#include "asm/pgalloc.h"
9#include "asm/pgtable.h"
9#include "asm/tlbflush.h" 10#include "asm/tlbflush.h"
10#include "choose-mode.h" 11#include "choose-mode.h"
11#include "mode_kern.h" 12#include "mode_kern.h"
@@ -123,106 +124,143 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
123 124
124#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) 125#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
125 126
127static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
128 unsigned long end, struct host_vm_op *ops,
129 int last_op, int *op_index, int force,
130 union mm_context *mmu, void **flush,
131 int (*do_ops)(union mm_context *,
132 struct host_vm_op *, int, int,
133 void **))
134{
135 pte_t *pte;
136 int r, w, x, ret = 0;
137
138 pte = pte_offset_kernel(pmd, addr);
139 do {
140 r = pte_read(*pte);
141 w = pte_write(*pte);
142 x = pte_exec(*pte);
143 if (!pte_young(*pte)) {
144 r = 0;
145 w = 0;
146 } else if (!pte_dirty(*pte)) {
147 w = 0;
148 }
149 if(force || pte_newpage(*pte)){
150 if(pte_present(*pte))
151 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152 PAGE_SIZE, r, w, x, ops,
153 op_index, last_op, mmu, flush,
154 do_ops);
155 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156 last_op, mmu, flush, do_ops);
157 }
158 else if(pte_newprot(*pte))
159 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
160 op_index, last_op, mmu, flush,
161 do_ops);
162 *pte = pte_mkuptodate(*pte);
163 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164 return ret;
165}
166
167static inline int update_pmd_range(pud_t *pud, unsigned long addr,
168 unsigned long end, struct host_vm_op *ops,
169 int last_op, int *op_index, int force,
170 union mm_context *mmu, void **flush,
171 int (*do_ops)(union mm_context *,
172 struct host_vm_op *, int, int,
173 void **))
174{
175 pmd_t *pmd;
176 unsigned long next;
177 int ret = 0;
178
179 pmd = pmd_offset(pud, addr);
180 do {
181 next = pmd_addr_end(addr, end);
182 if(!pmd_present(*pmd)){
183 if(force || pmd_newpage(*pmd)){
184 ret = add_munmap(addr, next - addr, ops,
185 op_index, last_op, mmu,
186 flush, do_ops);
187 pmd_mkuptodate(*pmd);
188 }
189 }
190 else ret = update_pte_range(pmd, addr, next, ops, last_op,
191 op_index, force, mmu, flush,
192 do_ops);
193 } while (pmd++, addr = next, ((addr != end) && !ret));
194 return ret;
195}
196
197static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
198 unsigned long end, struct host_vm_op *ops,
199 int last_op, int *op_index, int force,
200 union mm_context *mmu, void **flush,
201 int (*do_ops)(union mm_context *,
202 struct host_vm_op *, int, int,
203 void **))
204{
205 pud_t *pud;
206 unsigned long next;
207 int ret = 0;
208
209 pud = pud_offset(pgd, addr);
210 do {
211 next = pud_addr_end(addr, end);
212 if(!pud_present(*pud)){
213 if(force || pud_newpage(*pud)){
214 ret = add_munmap(addr, next - addr, ops,
215 op_index, last_op, mmu,
216 flush, do_ops);
217 pud_mkuptodate(*pud);
218 }
219 }
220 else ret = update_pmd_range(pud, addr, next, ops, last_op,
221 op_index, force, mmu, flush,
222 do_ops);
223 } while (pud++, addr = next, ((addr != end) && !ret));
224 return ret;
225}
226
126void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 227void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 unsigned long end_addr, int force, 228 unsigned long end_addr, int force,
128 int (*do_ops)(union mm_context *, struct host_vm_op *, 229 int (*do_ops)(union mm_context *, struct host_vm_op *,
129 int, int, void **)) 230 int, int, void **))
130{ 231{
131 pgd_t *npgd; 232 pgd_t *pgd;
132 pud_t *npud;
133 pmd_t *npmd;
134 pte_t *npte;
135 union mm_context *mmu = &mm->context; 233 union mm_context *mmu = &mm->context;
136 unsigned long addr, end;
137 int r, w, x;
138 struct host_vm_op ops[1]; 234 struct host_vm_op ops[1];
235 unsigned long addr = start_addr, next;
236 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
139 void *flush = NULL; 237 void *flush = NULL;
140 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1; 238 unsigned long long start_time, end_time;
141 int ret = 0;
142
143 if(mm == NULL)
144 return;
145 239
240 start_time = os_nsecs();
146 ops[0].type = NONE; 241 ops[0].type = NONE;
147 for(addr = start_addr; addr < end_addr && !ret;){ 242 pgd = pgd_offset(mm, addr);
148 npgd = pgd_offset(mm, addr); 243 do {
149 if(!pgd_present(*npgd)){ 244 next = pgd_addr_end(addr, end_addr);
150 end = ADD_ROUND(addr, PGDIR_SIZE); 245 if(!pgd_present(*pgd)){
151 if(end > end_addr) 246 if (force || pgd_newpage(*pgd)){
152 end = end_addr; 247 ret = add_munmap(addr, next - addr, ops,
153 if(force || pgd_newpage(*npgd)){
154 ret = add_munmap(addr, end - addr, ops,
155 &op_index, last_op, mmu,
156 &flush, do_ops);
157 pgd_mkuptodate(*npgd);
158 }
159 addr = end;
160 continue;
161 }
162
163 npud = pud_offset(npgd, addr);
164 if(!pud_present(*npud)){
165 end = ADD_ROUND(addr, PUD_SIZE);
166 if(end > end_addr)
167 end = end_addr;
168 if(force || pud_newpage(*npud)){
169 ret = add_munmap(addr, end - addr, ops,
170 &op_index, last_op, mmu,
171 &flush, do_ops);
172 pud_mkuptodate(*npud);
173 }
174 addr = end;
175 continue;
176 }
177
178 npmd = pmd_offset(npud, addr);
179 if(!pmd_present(*npmd)){
180 end = ADD_ROUND(addr, PMD_SIZE);
181 if(end > end_addr)
182 end = end_addr;
183 if(force || pmd_newpage(*npmd)){
184 ret = add_munmap(addr, end - addr, ops,
185 &op_index, last_op, mmu, 248 &op_index, last_op, mmu,
186 &flush, do_ops); 249 &flush, do_ops);
187 pmd_mkuptodate(*npmd); 250 pgd_mkuptodate(*pgd);
188 } 251 }
189 addr = end;
190 continue;
191 } 252 }
253 else ret = update_pud_range(pgd, addr, next, ops, last_op,
254 &op_index, force, mmu, &flush,
255 do_ops);
256 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
257 end_time = os_nsecs();
258 log_info("total flush time - %Ld nsecs\n", end_time - start_time);
192 259
193 npte = pte_offset_kernel(npmd, addr);
194 r = pte_read(*npte);
195 w = pte_write(*npte);
196 x = pte_exec(*npte);
197 if (!pte_young(*npte)) {
198 r = 0;
199 w = 0;
200 } else if (!pte_dirty(*npte)) {
201 w = 0;
202 }
203 if(force || pte_newpage(*npte)){
204 if(pte_present(*npte))
205 ret = add_mmap(addr,
206 pte_val(*npte) & PAGE_MASK,
207 PAGE_SIZE, r, w, x, ops,
208 &op_index, last_op, mmu,
209 &flush, do_ops);
210 else ret = add_munmap(addr, PAGE_SIZE, ops,
211 &op_index, last_op, mmu,
212 &flush, do_ops);
213 }
214 else if(pte_newprot(*npte))
215 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
216 &op_index, last_op, mmu,
217 &flush, do_ops);
218
219 *npte = pte_mkuptodate(*npte);
220 addr += PAGE_SIZE;
221 }
222 if(!ret) 260 if(!ret)
223 ret = (*do_ops)(mmu, ops, op_index, 1, &flush); 261 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
224 262
225/* This is not an else because ret is modified above */ 263 /* This is not an else because ret is modified above */
226 if(ret) { 264 if(ret) {
227 printk("fix_range_common: failed, killing current process\n"); 265 printk("fix_range_common: failed, killing current process\n");
228 force_sig(SIGKILL, current); 266 force_sig(SIGKILL, current);