aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-04-29 18:08:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:38 -0400
commit5bc7b8aca942d03bf2716ddcfcb4e0b57e43a1b8 (patch)
treec76049e13755609ecbd4c4066fddd8bbfdd9f650 /mm/huge_memory.c
parent1eec6702a80e04416d528846a5ff2122484d95ec (diff)
mm: thp: add split tail pages to shrink page list in page reclaim
In page reclaim, huge page is split. split_huge_page() adds tail pages to LRU list. Since we are reclaiming a huge page, it's better we reclaim all subpages of the huge page instead of just the head page. This patch adds split tail pages to shrink page list so the tail pages can be reclaimed soon. Before this patch, run a swap workload: thp_fault_alloc 3492 thp_fault_fallback 608 thp_collapse_alloc 6 thp_collapse_alloc_failed 0 thp_split 916 With this patch: thp_fault_alloc 4085 thp_fault_fallback 16 thp_collapse_alloc 90 thp_collapse_alloc_failed 0 thp_split 1272 fallback allocation is reduced a lot. [akpm@linux-foundation.org: fix CONFIG_SWAP=n build] Signed-off-by: Shaohua Li <shli@fusionio.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 45eaae030628..2ed1a160a85b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1559,7 +1559,8 @@ static int __split_huge_page_splitting(struct page *page,
1559 return ret; 1559 return ret;
1560} 1560}
1561 1561
1562static void __split_huge_page_refcount(struct page *page) 1562static void __split_huge_page_refcount(struct page *page,
1563 struct list_head *list)
1563{ 1564{
1564 int i; 1565 int i;
1565 struct zone *zone = page_zone(page); 1566 struct zone *zone = page_zone(page);
@@ -1645,7 +1646,7 @@ static void __split_huge_page_refcount(struct page *page)
1645 BUG_ON(!PageDirty(page_tail)); 1646 BUG_ON(!PageDirty(page_tail));
1646 BUG_ON(!PageSwapBacked(page_tail)); 1647 BUG_ON(!PageSwapBacked(page_tail));
1647 1648
1648 lru_add_page_tail(page, page_tail, lruvec); 1649 lru_add_page_tail(page, page_tail, lruvec, list);
1649 } 1650 }
1650 atomic_sub(tail_count, &page->_count); 1651 atomic_sub(tail_count, &page->_count);
1651 BUG_ON(atomic_read(&page->_count) <= 0); 1652 BUG_ON(atomic_read(&page->_count) <= 0);
@@ -1752,7 +1753,8 @@ static int __split_huge_page_map(struct page *page,
1752 1753
1753/* must be called with anon_vma->root->rwsem held */ 1754/* must be called with anon_vma->root->rwsem held */
1754static void __split_huge_page(struct page *page, 1755static void __split_huge_page(struct page *page,
1755 struct anon_vma *anon_vma) 1756 struct anon_vma *anon_vma,
1757 struct list_head *list)
1756{ 1758{
1757 int mapcount, mapcount2; 1759 int mapcount, mapcount2;
1758 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1760 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1783,7 +1785,7 @@ static void __split_huge_page(struct page *page,
1783 mapcount, page_mapcount(page)); 1785 mapcount, page_mapcount(page));
1784 BUG_ON(mapcount != page_mapcount(page)); 1786 BUG_ON(mapcount != page_mapcount(page));
1785 1787
1786 __split_huge_page_refcount(page); 1788 __split_huge_page_refcount(page, list);
1787 1789
1788 mapcount2 = 0; 1790 mapcount2 = 0;
1789 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1791 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
@@ -1798,7 +1800,14 @@ static void __split_huge_page(struct page *page,
1798 BUG_ON(mapcount != mapcount2); 1800 BUG_ON(mapcount != mapcount2);
1799} 1801}
1800 1802
1801int split_huge_page(struct page *page) 1803/*
1804 * Split a hugepage into normal pages. This doesn't change the position of head
1805 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1806 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1807 * from the hugepage.
1808 * Return 0 if the hugepage is split successfully otherwise return 1.
1809 */
1810int split_huge_page_to_list(struct page *page, struct list_head *list)
1802{ 1811{
1803 struct anon_vma *anon_vma; 1812 struct anon_vma *anon_vma;
1804 int ret = 1; 1813 int ret = 1;
@@ -1823,7 +1832,7 @@ int split_huge_page(struct page *page)
1823 goto out_unlock; 1832 goto out_unlock;
1824 1833
1825 BUG_ON(!PageSwapBacked(page)); 1834 BUG_ON(!PageSwapBacked(page));
1826 __split_huge_page(page, anon_vma); 1835 __split_huge_page(page, anon_vma, list);
1827 count_vm_event(THP_SPLIT); 1836 count_vm_event(THP_SPLIT);
1828 1837
1829 BUG_ON(PageCompound(page)); 1838 BUG_ON(PageCompound(page));