aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-04-29 18:08:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:38 -0400
commit5bc7b8aca942d03bf2716ddcfcb4e0b57e43a1b8 (patch)
treec76049e13755609ecbd4c4066fddd8bbfdd9f650 /mm
parent1eec6702a80e04416d528846a5ff2122484d95ec (diff)
mm: thp: add split tail pages to shrink page list in page reclaim
In page reclaim, huge page is split. split_huge_page() adds tail pages to LRU list. Since we are reclaiming a huge page, it's better we reclaim all subpages of the huge page instead of just the head page. This patch adds split tail pages to shrink page list so the tail pages can be reclaimed soon. Before this patch, run a swap workload: thp_fault_alloc 3492 thp_fault_fallback 608 thp_collapse_alloc 6 thp_collapse_alloc_failed 0 thp_split 916 With this patch: thp_fault_alloc 4085 thp_fault_fallback 16 thp_collapse_alloc 90 thp_collapse_alloc_failed 0 thp_split 1272 fallback allocation is reduced a lot. [akpm@linux-foundation.org: fix CONFIG_SWAP=n build] Signed-off-by: Shaohua Li <shli@fusionio.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c21
-rw-r--r--mm/swap.c11
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmscan.c2
4 files changed, 26 insertions, 12 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 45eaae030628..2ed1a160a85b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1559,7 +1559,8 @@ static int __split_huge_page_splitting(struct page *page,
1559 return ret; 1559 return ret;
1560} 1560}
1561 1561
1562static void __split_huge_page_refcount(struct page *page) 1562static void __split_huge_page_refcount(struct page *page,
1563 struct list_head *list)
1563{ 1564{
1564 int i; 1565 int i;
1565 struct zone *zone = page_zone(page); 1566 struct zone *zone = page_zone(page);
@@ -1645,7 +1646,7 @@ static void __split_huge_page_refcount(struct page *page)
1645 BUG_ON(!PageDirty(page_tail)); 1646 BUG_ON(!PageDirty(page_tail));
1646 BUG_ON(!PageSwapBacked(page_tail)); 1647 BUG_ON(!PageSwapBacked(page_tail));
1647 1648
1648 lru_add_page_tail(page, page_tail, lruvec); 1649 lru_add_page_tail(page, page_tail, lruvec, list);
1649 } 1650 }
1650 atomic_sub(tail_count, &page->_count); 1651 atomic_sub(tail_count, &page->_count);
1651 BUG_ON(atomic_read(&page->_count) <= 0); 1652 BUG_ON(atomic_read(&page->_count) <= 0);
@@ -1752,7 +1753,8 @@ static int __split_huge_page_map(struct page *page,
1752 1753
1753/* must be called with anon_vma->root->rwsem held */ 1754/* must be called with anon_vma->root->rwsem held */
1754static void __split_huge_page(struct page *page, 1755static void __split_huge_page(struct page *page,
1755 struct anon_vma *anon_vma) 1756 struct anon_vma *anon_vma,
1757 struct list_head *list)
1756{ 1758{
1757 int mapcount, mapcount2; 1759 int mapcount, mapcount2;
1758 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1760 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1783,7 +1785,7 @@ static void __split_huge_page(struct page *page,
1783 mapcount, page_mapcount(page)); 1785 mapcount, page_mapcount(page));
1784 BUG_ON(mapcount != page_mapcount(page)); 1786 BUG_ON(mapcount != page_mapcount(page));
1785 1787
1786 __split_huge_page_refcount(page); 1788 __split_huge_page_refcount(page, list);
1787 1789
1788 mapcount2 = 0; 1790 mapcount2 = 0;
1789 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1791 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
@@ -1798,7 +1800,14 @@ static void __split_huge_page(struct page *page,
1798 BUG_ON(mapcount != mapcount2); 1800 BUG_ON(mapcount != mapcount2);
1799} 1801}
1800 1802
1801int split_huge_page(struct page *page) 1803/*
1804 * Split a hugepage into normal pages. This doesn't change the position of head
1805 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1806 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1807 * from the hugepage.
1808 * Return 0 if the hugepage is split successfully otherwise return 1.
1809 */
1810int split_huge_page_to_list(struct page *page, struct list_head *list)
1802{ 1811{
1803 struct anon_vma *anon_vma; 1812 struct anon_vma *anon_vma;
1804 int ret = 1; 1813 int ret = 1;
@@ -1823,7 +1832,7 @@ int split_huge_page(struct page *page)
1823 goto out_unlock; 1832 goto out_unlock;
1824 1833
1825 BUG_ON(!PageSwapBacked(page)); 1834 BUG_ON(!PageSwapBacked(page));
1826 __split_huge_page(page, anon_vma); 1835 __split_huge_page(page, anon_vma, list);
1827 count_vm_event(THP_SPLIT); 1836 count_vm_event(THP_SPLIT);
1828 1837
1829 BUG_ON(PageCompound(page)); 1838 BUG_ON(PageCompound(page));
diff --git a/mm/swap.c b/mm/swap.c
index 8a529a01e8fc..acd40bfffa82 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -737,7 +737,7 @@ EXPORT_SYMBOL(__pagevec_release);
737#ifdef CONFIG_TRANSPARENT_HUGEPAGE 737#ifdef CONFIG_TRANSPARENT_HUGEPAGE
738/* used by __split_huge_page_refcount() */ 738/* used by __split_huge_page_refcount() */
739void lru_add_page_tail(struct page *page, struct page *page_tail, 739void lru_add_page_tail(struct page *page, struct page *page_tail,
740 struct lruvec *lruvec) 740 struct lruvec *lruvec, struct list_head *list)
741{ 741{
742 int uninitialized_var(active); 742 int uninitialized_var(active);
743 enum lru_list lru; 743 enum lru_list lru;
@@ -749,7 +749,8 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
749 VM_BUG_ON(NR_CPUS != 1 && 749 VM_BUG_ON(NR_CPUS != 1 &&
750 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 750 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
751 751
752 SetPageLRU(page_tail); 752 if (!list)
753 SetPageLRU(page_tail);
753 754
754 if (page_evictable(page_tail)) { 755 if (page_evictable(page_tail)) {
755 if (PageActive(page)) { 756 if (PageActive(page)) {
@@ -767,7 +768,11 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
767 768
768 if (likely(PageLRU(page))) 769 if (likely(PageLRU(page)))
769 list_add_tail(&page_tail->lru, &page->lru); 770 list_add_tail(&page_tail->lru, &page->lru);
770 else { 771 else if (list) {
772 /* page reclaim is reclaiming a huge page */
773 get_page(page_tail);
774 list_add_tail(&page_tail->lru, list);
775 } else {
771 struct list_head *list_head; 776 struct list_head *list_head;
772 /* 777 /*
773 * Head page has not yet been counted, as an hpage, 778 * Head page has not yet been counted, as an hpage,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fe43fd5578cf..b3d40dcf3624 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -160,7 +160,7 @@ void __delete_from_swap_cache(struct page *page)
160 * Allocate swap space for the page and add the page to the 160 * Allocate swap space for the page and add the page to the
161 * swap cache. Caller needs to hold the page lock. 161 * swap cache. Caller needs to hold the page lock.
162 */ 162 */
163int add_to_swap(struct page *page) 163int add_to_swap(struct page *page, struct list_head *list)
164{ 164{
165 swp_entry_t entry; 165 swp_entry_t entry;
166 int err; 166 int err;
@@ -173,7 +173,7 @@ int add_to_swap(struct page *page)
173 return 0; 173 return 0;
174 174
175 if (unlikely(PageTransHuge(page))) 175 if (unlikely(PageTransHuge(page)))
176 if (unlikely(split_huge_page(page))) { 176 if (unlikely(split_huge_page_to_list(page, list))) {
177 swapcache_free(entry, NULL); 177 swapcache_free(entry, NULL);
178 return 0; 178 return 0;
179 } 179 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e53e49584cf3..fa6a85378ee4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -781,7 +781,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
781 if (PageAnon(page) && !PageSwapCache(page)) { 781 if (PageAnon(page) && !PageSwapCache(page)) {
782 if (!(sc->gfp_mask & __GFP_IO)) 782 if (!(sc->gfp_mask & __GFP_IO))
783 goto keep_locked; 783 goto keep_locked;
784 if (!add_to_swap(page)) 784 if (!add_to_swap(page, page_list))
785 goto activate_locked; 785 goto activate_locked;
786 may_enter_fs = 1; 786 may_enter_fs = 1;
787 } 787 }