aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-04-29 18:08:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:38 -0400
commit5bc7b8aca942d03bf2716ddcfcb4e0b57e43a1b8 (patch)
treec76049e13755609ecbd4c4066fddd8bbfdd9f650 /mm/swap.c
parent1eec6702a80e04416d528846a5ff2122484d95ec (diff)
mm: thp: add split tail pages to shrink page list in page reclaim
In page reclaim, huge page is split. split_huge_page() adds tail pages to LRU list. Since we are reclaiming a huge page, it's better we reclaim all subpages of the huge page instead of just the head page. This patch adds split tail pages to shrink page list so the tail pages can be reclaimed soon. Before this patch, run a swap workload: thp_fault_alloc 3492 thp_fault_fallback 608 thp_collapse_alloc 6 thp_collapse_alloc_failed 0 thp_split 916 With this patch: thp_fault_alloc 4085 thp_fault_fallback 16 thp_collapse_alloc 90 thp_collapse_alloc_failed 0 thp_split 1272 fallback allocation is reduced a lot. [akpm@linux-foundation.org: fix CONFIG_SWAP=n build] Signed-off-by: Shaohua Li <shli@fusionio.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 8a529a01e8fc..acd40bfffa82 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -737,7 +737,7 @@ EXPORT_SYMBOL(__pagevec_release);
737#ifdef CONFIG_TRANSPARENT_HUGEPAGE 737#ifdef CONFIG_TRANSPARENT_HUGEPAGE
738/* used by __split_huge_page_refcount() */ 738/* used by __split_huge_page_refcount() */
739void lru_add_page_tail(struct page *page, struct page *page_tail, 739void lru_add_page_tail(struct page *page, struct page *page_tail,
740 struct lruvec *lruvec) 740 struct lruvec *lruvec, struct list_head *list)
741{ 741{
742 int uninitialized_var(active); 742 int uninitialized_var(active);
743 enum lru_list lru; 743 enum lru_list lru;
@@ -749,7 +749,8 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
749 VM_BUG_ON(NR_CPUS != 1 && 749 VM_BUG_ON(NR_CPUS != 1 &&
750 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 750 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
751 751
752 SetPageLRU(page_tail); 752 if (!list)
753 SetPageLRU(page_tail);
753 754
754 if (page_evictable(page_tail)) { 755 if (page_evictable(page_tail)) {
755 if (PageActive(page)) { 756 if (PageActive(page)) {
@@ -767,7 +768,11 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
767 768
768 if (likely(PageLRU(page))) 769 if (likely(PageLRU(page)))
769 list_add_tail(&page_tail->lru, &page->lru); 770 list_add_tail(&page_tail->lru, &page->lru);
770 else { 771 else if (list) {
772 /* page reclaim is reclaiming a huge page */
773 get_page(page_tail);
774 list_add_tail(&page_tail->lru, list);
775 } else {
771 struct list_head *list_head; 776 struct list_head *list_head;
772 /* 777 /*
773 * Head page has not yet been counted, as an hpage, 778 * Head page has not yet been counted, as an hpage,