aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2016-12-12 19:44:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:08 -0500
commit763b218ddfaf56761c19923beb7e16656f66ec62 (patch)
treed20c1ca842ce466541b1b98ed0fc5589f51a0897 /mm/vmalloc.c
parentf9e09977671b618aeb25ddc0d4c9a84d5b5cde9d (diff)
mm: add preempt points into __purge_vmap_area_lazy()
Use cond_resched_lock to avoid holding the vmap_area_lock for a potentially long time and thus creating bad latencies for various workloads. [hch: split from a larger patch by Joel, wrote the crappy changelog] Link: http://lkml.kernel.org/r/1479474236-4139-11-git-send-email-hch@lst.de Signed-off-by: Joel Fernandes <joelaf@google.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Jisheng Zhang <jszhang@marvell.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d3c1f5ee48b4..a5584384eabc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -628,7 +628,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
628 struct llist_node *valist; 628 struct llist_node *valist;
629 struct vmap_area *va; 629 struct vmap_area *va;
630 struct vmap_area *n_va; 630 struct vmap_area *n_va;
631 int nr = 0; 631 bool do_free = false;
632 632
633 lockdep_assert_held(&vmap_purge_lock); 633 lockdep_assert_held(&vmap_purge_lock);
634 634
@@ -638,18 +638,22 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
638 start = va->va_start; 638 start = va->va_start;
639 if (va->va_end > end) 639 if (va->va_end > end)
640 end = va->va_end; 640 end = va->va_end;
641 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 641 do_free = true;
642 } 642 }
643 643
644 if (!nr) 644 if (!do_free)
645 return false; 645 return false;
646 646
647 atomic_sub(nr, &vmap_lazy_nr);
648 flush_tlb_kernel_range(start, end); 647 flush_tlb_kernel_range(start, end);
649 648
650 spin_lock(&vmap_area_lock); 649 spin_lock(&vmap_area_lock);
651 llist_for_each_entry_safe(va, n_va, valist, purge_list) 650 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
651 int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
652
652 __free_vmap_area(va); 653 __free_vmap_area(va);
654 atomic_sub(nr, &vmap_lazy_nr);
655 cond_resched_lock(&vmap_area_lock);
656 }
653 spin_unlock(&vmap_area_lock); 657 spin_unlock(&vmap_area_lock);
654 return true; 658 return true;
655} 659}