diff options
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r-- | arch/um/kernel/tlb.c | 51 |
1 files changed, 30 insertions, 21 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index f4a0e407eee4..d175d0566af0 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -3,9 +3,10 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/mm.h" | 6 | #include <linux/mm.h> |
7 | #include "asm/pgtable.h" | 7 | #include <linux/sched.h> |
8 | #include "asm/tlbflush.h" | 8 | #include <asm/pgtable.h> |
9 | #include <asm/tlbflush.h> | ||
9 | #include "as-layout.h" | 10 | #include "as-layout.h" |
10 | #include "mem_user.h" | 11 | #include "mem_user.h" |
11 | #include "os.h" | 12 | #include "os.h" |
@@ -56,7 +57,7 @@ static int do_ops(struct host_vm_change *hvc, int end, | |||
56 | 57 | ||
57 | for (i = 0; i < end && !ret; i++) { | 58 | for (i = 0; i < end && !ret; i++) { |
58 | op = &hvc->ops[i]; | 59 | op = &hvc->ops[i]; |
59 | switch(op->type) { | 60 | switch (op->type) { |
60 | case MMAP: | 61 | case MMAP: |
61 | ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, | 62 | ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, |
62 | op->u.mmap.prot, op->u.mmap.fd, | 63 | op->u.mmap.prot, op->u.mmap.fd, |
@@ -183,27 +184,30 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
183 | 184 | ||
184 | pte = pte_offset_kernel(pmd, addr); | 185 | pte = pte_offset_kernel(pmd, addr); |
185 | do { | 186 | do { |
187 | if ((addr >= STUB_START) && (addr < STUB_END)) | ||
188 | continue; | ||
189 | |||
186 | r = pte_read(*pte); | 190 | r = pte_read(*pte); |
187 | w = pte_write(*pte); | 191 | w = pte_write(*pte); |
188 | x = pte_exec(*pte); | 192 | x = pte_exec(*pte); |
189 | if (!pte_young(*pte)) { | 193 | if (!pte_young(*pte)) { |
190 | r = 0; | 194 | r = 0; |
191 | w = 0; | 195 | w = 0; |
192 | } else if (!pte_dirty(*pte)) { | 196 | } else if (!pte_dirty(*pte)) |
193 | w = 0; | 197 | w = 0; |
194 | } | 198 | |
195 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | 199 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | |
196 | (x ? UM_PROT_EXEC : 0)); | 200 | (x ? UM_PROT_EXEC : 0)); |
197 | if (hvc->force || pte_newpage(*pte)) { | 201 | if (hvc->force || pte_newpage(*pte)) { |
198 | if (pte_present(*pte)) | 202 | if (pte_present(*pte)) |
199 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, | 203 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
200 | PAGE_SIZE, prot, hvc); | 204 | PAGE_SIZE, prot, hvc); |
201 | else ret = add_munmap(addr, PAGE_SIZE, hvc); | 205 | else |
202 | } | 206 | ret = add_munmap(addr, PAGE_SIZE, hvc); |
203 | else if (pte_newprot(*pte)) | 207 | } else if (pte_newprot(*pte)) |
204 | ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); | 208 | ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); |
205 | *pte = pte_mkuptodate(*pte); | 209 | *pte = pte_mkuptodate(*pte); |
206 | } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); | 210 | } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret)); |
207 | return ret; | 211 | return ret; |
208 | } | 212 | } |
209 | 213 | ||
@@ -225,7 +229,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr, | |||
225 | } | 229 | } |
226 | } | 230 | } |
227 | else ret = update_pte_range(pmd, addr, next, hvc); | 231 | else ret = update_pte_range(pmd, addr, next, hvc); |
228 | } while (pmd++, addr = next, ((addr != end) && !ret)); | 232 | } while (pmd++, addr = next, ((addr < end) && !ret)); |
229 | return ret; | 233 | return ret; |
230 | } | 234 | } |
231 | 235 | ||
@@ -247,7 +251,7 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr, | |||
247 | } | 251 | } |
248 | } | 252 | } |
249 | else ret = update_pmd_range(pud, addr, next, hvc); | 253 | else ret = update_pmd_range(pud, addr, next, hvc); |
250 | } while (pud++, addr = next, ((addr != end) && !ret)); | 254 | } while (pud++, addr = next, ((addr < end) && !ret)); |
251 | return ret; | 255 | return ret; |
252 | } | 256 | } |
253 | 257 | ||
@@ -270,7 +274,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
270 | } | 274 | } |
271 | } | 275 | } |
272 | else ret = update_pud_range(pgd, addr, next, &hvc); | 276 | else ret = update_pud_range(pgd, addr, next, &hvc); |
273 | } while (pgd++, addr = next, ((addr != end_addr) && !ret)); | 277 | } while (pgd++, addr = next, ((addr < end_addr) && !ret)); |
274 | 278 | ||
275 | if (!ret) | 279 | if (!ret) |
276 | ret = do_ops(&hvc, hvc.index, 1); | 280 | ret = do_ops(&hvc, hvc.index, 1); |
@@ -485,9 +489,6 @@ void __flush_tlb_one(unsigned long addr) | |||
485 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | 489 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, |
486 | unsigned long end_addr, int force) | 490 | unsigned long end_addr, int force) |
487 | { | 491 | { |
488 | if (!proc_mm && (end_addr > STUB_START)) | ||
489 | end_addr = STUB_START; | ||
490 | |||
491 | fix_range_common(mm, start_addr, end_addr, force); | 492 | fix_range_common(mm, start_addr, end_addr, force); |
492 | } | 493 | } |
493 | 494 | ||
@@ -499,10 +500,9 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
499 | else fix_range(vma->vm_mm, start, end, 0); | 500 | else fix_range(vma->vm_mm, start, end, 0); |
500 | } | 501 | } |
501 | 502 | ||
502 | void flush_tlb_mm(struct mm_struct *mm) | 503 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
504 | unsigned long end) | ||
503 | { | 505 | { |
504 | unsigned long end; | ||
505 | |||
506 | /* | 506 | /* |
507 | * Don't bother flushing if this address space is about to be | 507 | * Don't bother flushing if this address space is about to be |
508 | * destroyed. | 508 | * destroyed. |
@@ -510,8 +510,17 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
510 | if (atomic_read(&mm->mm_users) == 0) | 510 | if (atomic_read(&mm->mm_users) == 0) |
511 | return; | 511 | return; |
512 | 512 | ||
513 | end = proc_mm ? task_size : STUB_START; | 513 | fix_range(mm, start, end, 0); |
514 | fix_range(mm, 0, end, 0); | 514 | } |
515 | |||
516 | void flush_tlb_mm(struct mm_struct *mm) | ||
517 | { | ||
518 | struct vm_area_struct *vma = mm->mmap; | ||
519 | |||
520 | while (vma != NULL) { | ||
521 | fix_range(mm, vma->vm_start, vma->vm_end, 0); | ||
522 | vma = vma->vm_next; | ||
523 | } | ||
515 | } | 524 | } |
516 | 525 | ||
517 | void force_flush_all(void) | 526 | void force_flush_all(void) |