diff options
author | Shaohua Li <shaohua.li@intel.com> | 2012-01-12 20:19:18 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:08 -0500 |
commit | 45676885b76237a4c236d26fe20a9b0cfdb2eb22 (patch) | |
tree | e1a5222c4c7b439cb6a27ace0e36e0280f8b7870 | |
parent | f21760b15dcd091e5afd38d0b97197b45f7ef2ea (diff) |
thp: improve order in lru list for split huge page
Put the tail subpages of an isolated hugepage under splitting in the lru
reclaim head as they supposedly should be isolated too next.
Queues the subpages in physical order in the lru for non isolated
hugepages under splitting. That might provide some theoretical cache
benefit to the buddy allocator later.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/huge_memory.c | 5 | ||||
-rw-r--r-- | mm/swap.c | 2 |
2 files changed, 3 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5a595554bd8c..76cc3f7dd4f0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1228,7 +1228,6 @@ static int __split_huge_page_splitting(struct page *page, | |||
1228 | static void __split_huge_page_refcount(struct page *page) | 1228 | static void __split_huge_page_refcount(struct page *page) |
1229 | { | 1229 | { |
1230 | int i; | 1230 | int i; |
1231 | unsigned long head_index = page->index; | ||
1232 | struct zone *zone = page_zone(page); | 1231 | struct zone *zone = page_zone(page); |
1233 | int zonestat; | 1232 | int zonestat; |
1234 | int tail_count = 0; | 1233 | int tail_count = 0; |
@@ -1239,7 +1238,7 @@ static void __split_huge_page_refcount(struct page *page) | |||
1239 | /* complete memcg works before add pages to LRU */ | 1238 | /* complete memcg works before add pages to LRU */ |
1240 | mem_cgroup_split_huge_fixup(page); | 1239 | mem_cgroup_split_huge_fixup(page); |
1241 | 1240 | ||
1242 | for (i = 1; i < HPAGE_PMD_NR; i++) { | 1241 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { |
1243 | struct page *page_tail = page + i; | 1242 | struct page *page_tail = page + i; |
1244 | 1243 | ||
1245 | /* tail_page->_mapcount cannot change */ | 1244 | /* tail_page->_mapcount cannot change */ |
@@ -1302,7 +1301,7 @@ static void __split_huge_page_refcount(struct page *page) | |||
1302 | BUG_ON(page_tail->mapping); | 1301 | BUG_ON(page_tail->mapping); |
1303 | page_tail->mapping = page->mapping; | 1302 | page_tail->mapping = page->mapping; |
1304 | 1303 | ||
1305 | page_tail->index = ++head_index; | 1304 | page_tail->index = page->index + i; |
1306 | 1305 | ||
1307 | BUG_ON(!PageAnon(page_tail)); | 1306 | BUG_ON(!PageAnon(page_tail)); |
1308 | BUG_ON(!PageUptodate(page_tail)); | 1307 | BUG_ON(!PageUptodate(page_tail)); |
@@ -681,7 +681,7 @@ void lru_add_page_tail(struct zone* zone, | |||
681 | if (likely(PageLRU(page))) | 681 | if (likely(PageLRU(page))) |
682 | list_add(&page_tail->lru, page->lru.prev); | 682 | list_add(&page_tail->lru, page->lru.prev); |
683 | else | 683 | else |
684 | list_add(&page_tail->lru, &lruvec->lists[lru]); | 684 | list_add(&page_tail->lru, lruvec->lists[lru].prev); |
685 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, | 685 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, |
686 | hpage_nr_pages(page_tail)); | 686 | hpage_nr_pages(page_tail)); |
687 | } else { | 687 | } else { |