diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-08-09 20:19:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:45:00 -0400 |
commit | abe4c3b50c3f25cb1baf56036024860f12f96015 (patch) | |
tree | 20ac47ac168b30f1dde6773d103ed13432802049 | |
parent | 666356297ec4e9e6594c6008803f2b1403ff7950 (diff) |
vmscan: set up pagevec as late as possible in shrink_page_list()
shrink_page_list() sets up a pagevec to release pages as according as they
are free. It uses significant amounts of stack on the pagevec. This
patch adds pages to be freed via pagevec to a linked list which is then
freed en-masse at the end. This avoids using stack in the main path that
potentially calls writepage().
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmscan.c | 36 |
1 files changed, 28 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 12b692164bcc..512f4630ba8c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -622,6 +622,24 @@ static enum page_references page_check_references(struct page *page, | |||
622 | return PAGEREF_RECLAIM; | 622 | return PAGEREF_RECLAIM; |
623 | } | 623 | } |
624 | 624 | ||
625 | static noinline_for_stack void free_page_list(struct list_head *free_pages) | ||
626 | { | ||
627 | struct pagevec freed_pvec; | ||
628 | struct page *page, *tmp; | ||
629 | |||
630 | pagevec_init(&freed_pvec, 1); | ||
631 | |||
632 | list_for_each_entry_safe(page, tmp, free_pages, lru) { | ||
633 | list_del(&page->lru); | ||
634 | if (!pagevec_add(&freed_pvec, page)) { | ||
635 | __pagevec_free(&freed_pvec); | ||
636 | pagevec_reinit(&freed_pvec); | ||
637 | } | ||
638 | } | ||
639 | |||
640 | pagevec_free(&freed_pvec); | ||
641 | } | ||
642 | |||
625 | /* | 643 | /* |
626 | * shrink_page_list() returns the number of reclaimed pages | 644 | * shrink_page_list() returns the number of reclaimed pages |
627 | */ | 645 | */ |
@@ -630,13 +648,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
630 | enum pageout_io sync_writeback) | 648 | enum pageout_io sync_writeback) |
631 | { | 649 | { |
632 | LIST_HEAD(ret_pages); | 650 | LIST_HEAD(ret_pages); |
633 | struct pagevec freed_pvec; | 651 | LIST_HEAD(free_pages); |
634 | int pgactivate = 0; | 652 | int pgactivate = 0; |
635 | unsigned long nr_reclaimed = 0; | 653 | unsigned long nr_reclaimed = 0; |
636 | 654 | ||
637 | cond_resched(); | 655 | cond_resched(); |
638 | 656 | ||
639 | pagevec_init(&freed_pvec, 1); | ||
640 | while (!list_empty(page_list)) { | 657 | while (!list_empty(page_list)) { |
641 | enum page_references references; | 658 | enum page_references references; |
642 | struct address_space *mapping; | 659 | struct address_space *mapping; |
@@ -811,10 +828,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
811 | __clear_page_locked(page); | 828 | __clear_page_locked(page); |
812 | free_it: | 829 | free_it: |
813 | nr_reclaimed++; | 830 | nr_reclaimed++; |
814 | if (!pagevec_add(&freed_pvec, page)) { | 831 | |
815 | __pagevec_free(&freed_pvec); | 832 | /* |
816 | pagevec_reinit(&freed_pvec); | 833 | * Is there need to periodically free_page_list? It would |
817 | } | 834 | * appear not as the counts should be low |
835 | */ | ||
836 | list_add(&page->lru, &free_pages); | ||
818 | continue; | 837 | continue; |
819 | 838 | ||
820 | cull_mlocked: | 839 | cull_mlocked: |
@@ -837,9 +856,10 @@ keep: | |||
837 | list_add(&page->lru, &ret_pages); | 856 | list_add(&page->lru, &ret_pages); |
838 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); | 857 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); |
839 | } | 858 | } |
859 | |||
860 | free_page_list(&free_pages); | ||
861 | |||
840 | list_splice(&ret_pages, page_list); | 862 | list_splice(&ret_pages, page_list); |
841 | if (pagevec_count(&freed_pvec)) | ||
842 | __pagevec_free(&freed_pvec); | ||
843 | count_vm_events(PGACTIVATE, pgactivate); | 863 | count_vm_events(PGACTIVATE, pgactivate); |
844 | return nr_reclaimed; | 864 | return nr_reclaimed; |
845 | } | 865 | } |