diff options
author | Jeff Dike <jdike@addtoit.com> | 2008-02-05 01:31:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:29 -0500 |
commit | 0b4e273fb83bce5dd8e166a4defb16ebdd215abf (patch) | |
tree | 18f30fe9092dacf9a7e3474a9b0d692d91962242 /arch/um | |
parent | 909e90d3c410b684e564729145f7c20dad887757 (diff) |
uml: customize tlb.h
Customize the hooks in tlb.h to optimize TLB flushing some more.
Add start and end fields to tlb_gather_mmu, which are used to limit
the address space range scanned when a region is unmapped.
The interfaces which just free page tables, without actually changing
mappings, don't need to cause a TLB flush.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um')
-rw-r--r-- | arch/um/kernel/tlb.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 8127ca8d5957..0b6a77def311 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -193,18 +193,18 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
193 | if (!pte_young(*pte)) { | 193 | if (!pte_young(*pte)) { |
194 | r = 0; | 194 | r = 0; |
195 | w = 0; | 195 | w = 0; |
196 | } else if (!pte_dirty(*pte)) { | 196 | } else if (!pte_dirty(*pte)) |
197 | w = 0; | 197 | w = 0; |
198 | } | 198 | |
199 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | 199 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | |
200 | (x ? UM_PROT_EXEC : 0)); | 200 | (x ? UM_PROT_EXEC : 0)); |
201 | if (hvc->force || pte_newpage(*pte)) { | 201 | if (hvc->force || pte_newpage(*pte)) { |
202 | if (pte_present(*pte)) | 202 | if (pte_present(*pte)) |
203 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, | 203 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
204 | PAGE_SIZE, prot, hvc); | 204 | PAGE_SIZE, prot, hvc); |
205 | else ret = add_munmap(addr, PAGE_SIZE, hvc); | 205 | else |
206 | } | 206 | ret = add_munmap(addr, PAGE_SIZE, hvc); |
207 | else if (pte_newprot(*pte)) | 207 | } else if (pte_newprot(*pte)) |
208 | ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); | 208 | ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); |
209 | *pte = pte_mkuptodate(*pte); | 209 | *pte = pte_mkuptodate(*pte); |
210 | } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret)); | 210 | } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret)); |
@@ -500,7 +500,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
500 | else fix_range(vma->vm_mm, start, end, 0); | 500 | else fix_range(vma->vm_mm, start, end, 0); |
501 | } | 501 | } |
502 | 502 | ||
503 | void flush_tlb_mm(struct mm_struct *mm) | 503 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
504 | unsigned long end) | ||
504 | { | 505 | { |
505 | /* | 506 | /* |
506 | * Don't bother flushing if this address space is about to be | 507 | * Don't bother flushing if this address space is about to be |
@@ -509,7 +510,17 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
509 | if (atomic_read(&mm->mm_users) == 0) | 510 | if (atomic_read(&mm->mm_users) == 0) |
510 | return; | 511 | return; |
511 | 512 | ||
512 | fix_range(mm, 0, TASK_SIZE, 0); | 513 | fix_range(mm, start, end, 0); |
514 | } | ||
515 | |||
516 | void flush_tlb_mm(struct mm_struct *mm) | ||
517 | { | ||
518 | struct vm_area_struct *vma = mm->mmap; | ||
519 | |||
520 | while (vma != NULL) { | ||
521 | fix_range(mm, vma->vm_start, vma->vm_end, 0); | ||
522 | vma = vma->vm_next; | ||
523 | } | ||
513 | } | 524 | } |
514 | 525 | ||
515 | void force_flush_all(void) | 526 | void force_flush_all(void) |