aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorUladzislau Rezki (Sony) <urezki@gmail.com>2019-05-14 18:41:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 22:52:48 -0400
commit68571be99f323c3c3db62a8513a43380ccefe97c (patch)
tree65874a16c02ef477c3b213ae821c627f64c3cfb3 /mm/vmalloc.c
parentdf5ba5be7425e1df296d40c5f37a39d98ec666a2 (diff)
mm/vmalloc.c: add priority threshold to __purge_vmap_area_lazy()
Commit 763b218ddfaf ("mm: add preempt points into __purge_vmap_area_lazy()") introduced some preempt points, one of those is making an allocation more prioritized over lazy free of vmap areas. Prioritizing an allocation over freeing does not work well all the time, i.e. it should be rather a compromise. 1) Number of lazy pages directly influences the busy list length thus on operations like: allocation, lookup, unmap, remove, etc. 2) Under heavy stress of vmalloc subsystem I run into a situation when memory usage gets increased hitting out_of_memory -> panic state due to completely blocking of logic that frees vmap areas in the __purge_vmap_area_lazy() function. Establish a threshold passing which the freeing is prioritized back over allocation creating a balance between each other. Using vmalloc test driver in "stress mode", i.e. When all available test cases are run simultaneously on all online CPUs applying a pressure on the vmalloc subsystem, my HiKey 960 board runs out of memory due to the fact that __purge_vmap_area_lazy() logic simply is not able to free pages in time. How I run it: 1) You should build your kernel with CONFIG_TEST_VMALLOC=m 2) ./tools/testing/selftests/vm/test_vmalloc.sh stress During this test "vmap_lazy_nr" pages will go far beyond acceptable lazy_max_pages() threshold, that will lead to enormous busy list size and other problems including allocation time and so on. Link: http://lkml.kernel.org/r/20190124115648.9433-3-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Thomas Garnier <thgarnie@google.com> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Joel Fernandes <joelaf@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Tejun Heo <tj@kernel.org> Cc: Joel Fernandes <joel@joelfernandes.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e5e9e1fcac01..072c8e0df90a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -662,23 +662,27 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
662 struct llist_node *valist; 662 struct llist_node *valist;
663 struct vmap_area *va; 663 struct vmap_area *va;
664 struct vmap_area *n_va; 664 struct vmap_area *n_va;
665 bool do_free = false; 665 int resched_threshold;
666 666
667 lockdep_assert_held(&vmap_purge_lock); 667 lockdep_assert_held(&vmap_purge_lock);
668 668
669 valist = llist_del_all(&vmap_purge_list); 669 valist = llist_del_all(&vmap_purge_list);
670 if (unlikely(valist == NULL))
671 return false;
672
673 /*
674 * TODO: to calculate a flush range without looping.
675 * The list can be up to lazy_max_pages() elements.
676 */
670 llist_for_each_entry(va, valist, purge_list) { 677 llist_for_each_entry(va, valist, purge_list) {
671 if (va->va_start < start) 678 if (va->va_start < start)
672 start = va->va_start; 679 start = va->va_start;
673 if (va->va_end > end) 680 if (va->va_end > end)
674 end = va->va_end; 681 end = va->va_end;
675 do_free = true;
676 } 682 }
677 683
678 if (!do_free)
679 return false;
680
681 flush_tlb_kernel_range(start, end); 684 flush_tlb_kernel_range(start, end);
685 resched_threshold = (int) lazy_max_pages() << 1;
682 686
683 spin_lock(&vmap_area_lock); 687 spin_lock(&vmap_area_lock);
684 llist_for_each_entry_safe(va, n_va, valist, purge_list) { 688 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
@@ -686,7 +690,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
686 690
687 __free_vmap_area(va); 691 __free_vmap_area(va);
688 atomic_sub(nr, &vmap_lazy_nr); 692 atomic_sub(nr, &vmap_lazy_nr);
689 cond_resched_lock(&vmap_area_lock); 693
694 if (atomic_read(&vmap_lazy_nr) < resched_threshold)
695 cond_resched_lock(&vmap_area_lock);
690 } 696 }
691 spin_unlock(&vmap_area_lock); 697 spin_unlock(&vmap_area_lock);
692 return true; 698 return true;