diff options
-rw-r--r-- | mm/vmalloc.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e5e9e1fcac01..072c8e0df90a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -662,23 +662,27 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) | |||
662 | struct llist_node *valist; | 662 | struct llist_node *valist; |
663 | struct vmap_area *va; | 663 | struct vmap_area *va; |
664 | struct vmap_area *n_va; | 664 | struct vmap_area *n_va; |
665 | bool do_free = false; | 665 | int resched_threshold; |
666 | 666 | ||
667 | lockdep_assert_held(&vmap_purge_lock); | 667 | lockdep_assert_held(&vmap_purge_lock); |
668 | 668 | ||
669 | valist = llist_del_all(&vmap_purge_list); | 669 | valist = llist_del_all(&vmap_purge_list); |
670 | if (unlikely(valist == NULL)) | ||
671 | return false; | ||
672 | |||
673 | /* | ||
674 | * TODO: to calculate a flush range without looping. | ||
675 | * The list can be up to lazy_max_pages() elements. | ||
676 | */ | ||
670 | llist_for_each_entry(va, valist, purge_list) { | 677 | llist_for_each_entry(va, valist, purge_list) { |
671 | if (va->va_start < start) | 678 | if (va->va_start < start) |
672 | start = va->va_start; | 679 | start = va->va_start; |
673 | if (va->va_end > end) | 680 | if (va->va_end > end) |
674 | end = va->va_end; | 681 | end = va->va_end; |
675 | do_free = true; | ||
676 | } | 682 | } |
677 | 683 | ||
678 | if (!do_free) | ||
679 | return false; | ||
680 | |||
681 | flush_tlb_kernel_range(start, end); | 684 | flush_tlb_kernel_range(start, end); |
685 | resched_threshold = (int) lazy_max_pages() << 1; | ||
682 | 686 | ||
683 | spin_lock(&vmap_area_lock); | 687 | spin_lock(&vmap_area_lock); |
684 | llist_for_each_entry_safe(va, n_va, valist, purge_list) { | 688 | llist_for_each_entry_safe(va, n_va, valist, purge_list) { |
@@ -686,7 +690,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) | |||
686 | 690 | ||
687 | __free_vmap_area(va); | 691 | __free_vmap_area(va); |
688 | atomic_sub(nr, &vmap_lazy_nr); | 692 | atomic_sub(nr, &vmap_lazy_nr); |
689 | cond_resched_lock(&vmap_area_lock); | 693 | |
694 | if (atomic_read(&vmap_lazy_nr) < resched_threshold) | ||
695 | cond_resched_lock(&vmap_area_lock); | ||
690 | } | 696 | } |
691 | spin_unlock(&vmap_area_lock); | 697 | spin_unlock(&vmap_area_lock); |
692 | return true; | 698 | return true; |