summaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorAnton Ivanov <anton.ivanov@cambridgegreys.com>2018-12-07 04:05:53 -0500
committerRichard Weinberger <richard@nod.at>2018-12-27 16:48:34 -0500
commit742f3c8193a3cb3e444887211214ef0721e3ef8d (patch)
treec74eb64a5d8b02cca48b7b5ab7e97b415fc957bf /arch/um
parent8892d8545f2d0342b9c550defbfb165db237044b (diff)
um: Optimize Flush TLB for force/fork case
When UML handles a fork the page tables need to be brought up to date. That was done using brute force - full tlb flush. This is actually unnecessary, because the mapped-in mappings are all correct and the only mappings which need to be updated after a flush are any unmaps (so that paging works) as well as any pending protection changes. This optimization squeezes out up to 3% from a full kernel rebuild time under memory pressure. Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/kernel/tlb.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9ca902df243a..8347161c2ae0 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -242,10 +242,11 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
242 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 242 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
243 (x ? UM_PROT_EXEC : 0)); 243 (x ? UM_PROT_EXEC : 0));
244 if (hvc->force || pte_newpage(*pte)) { 244 if (hvc->force || pte_newpage(*pte)) {
245 if (pte_present(*pte)) 245 if (pte_present(*pte)) {
246 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 246 if (pte_newpage(*pte))
247 PAGE_SIZE, prot, hvc); 247 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
248 else 248 PAGE_SIZE, prot, hvc);
249 } else
249 ret = add_munmap(addr, PAGE_SIZE, hvc); 250 ret = add_munmap(addr, PAGE_SIZE, hvc);
250 } else if (pte_newprot(*pte)) 251 } else if (pte_newprot(*pte))
251 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); 252 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);