diff options
author | Hugh Dickins <hughd@google.com> | 2012-01-12 20:19:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:10 -0500 |
commit | 2bcf887963812c075f80a14e1fad8ec7e1c67acf (patch) | |
tree | 132f11eefe904653307a05b77d16f4c41866e486 /mm/swap.c | |
parent | 90b3feaec8ffb167abd8903bf111605c2f035aa8 (diff) |
mm: take pagevecs off reclaim stack
Replace pagevecs in putback_lru_pages() and move_active_pages_to_lru()
by lists of pages_to_free: then apply Konstantin Khlebnikov's
free_hot_cold_page_list() to them instead of pagevec_release().
Which simplifies the flow (no need to drop and retake lock whenever
pagevec fills up) and reduces stale addresses in stack backtraces
(which often showed through the pagevecs); but more importantly,
removes another 120 bytes from the deepest stacks in page reclaim.
Although I've not recently seen an actual stack overflow here with
a vanilla kernel, move_active_pages_to_lru() has often featured in
deep backtraces.
However, free_hot_cold_page_list() does not handle compound pages
(nor need it: a Transparent HugePage would have been split by the
time it reaches the call in shrink_page_list()), but it is possible
for putback_lru_pages() or move_active_pages_to_lru() to be left
holding the last reference on a THP, so must exclude the unlikely
compound case before putting on pages_to_free.
Remove pagevec_strip(), its work now done in move_active_pages_to_lru().
The pagevec in scan_mapping_unevictable_pages() remains in mm/vmscan.c,
but that is never on the reclaim path, and cannot be replaced by a list.
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 19 |
1 files changed, 0 insertions, 19 deletions
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/mm_inline.h> | 25 | #include <linux/mm_inline.h> |
26 | #include <linux/buffer_head.h> /* for try_to_release_page() */ | ||
27 | #include <linux/percpu_counter.h> | 26 | #include <linux/percpu_counter.h> |
28 | #include <linux/percpu.h> | 27 | #include <linux/percpu.h> |
29 | #include <linux/cpu.h> | 28 | #include <linux/cpu.h> |
@@ -730,24 +729,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
730 | 729 | ||
731 | EXPORT_SYMBOL(____pagevec_lru_add); | 730 | EXPORT_SYMBOL(____pagevec_lru_add); |
732 | 731 | ||
733 | /* | ||
734 | * Try to drop buffers from the pages in a pagevec | ||
735 | */ | ||
736 | void pagevec_strip(struct pagevec *pvec) | ||
737 | { | ||
738 | int i; | ||
739 | |||
740 | for (i = 0; i < pagevec_count(pvec); i++) { | ||
741 | struct page *page = pvec->pages[i]; | ||
742 | |||
743 | if (page_has_private(page) && trylock_page(page)) { | ||
744 | if (page_has_private(page)) | ||
745 | try_to_release_page(page, 0); | ||
746 | unlock_page(page); | ||
747 | } | ||
748 | } | ||
749 | } | ||
750 | |||
751 | /** | 732 | /** |
752 | * pagevec_lookup - gang pagecache lookup | 733 | * pagevec_lookup - gang pagecache lookup |
753 | * @pvec: Where the resulting pages are placed | 734 | * @pvec: Where the resulting pages are placed |