summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-05-08 18:54:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 20:15:10 -0400
commit02aa0cdd72483c6dd436ed24d1000f86e0038d28 (patch)
tree0cdf8eec0c5f69b59d7284519ae6b44aab6d6aed /mm
parent3bc48f96cf11ce8699e419d5e47ae0d456403274 (diff)
mm, page_alloc: count movable pages when stealing from pageblock
When stealing pages from pageblock of a different migratetype, we count how many free pages were stolen, and change the pageblock's migratetype if more than half of the pageblock was free. This might be too conservative, as there might be other pages that are not free, but were allocated with the same migratetype as our allocation requested. While we cannot determine the migratetype of allocated pages precisely (at least without the page_owner functionality enabled), we can count pages that compaction would try to isolate for migration - those are either on LRU or __PageMovable(). The rest can be assumed to be MIGRATE_RECLAIMABLE or MIGRATE_UNMOVABLE, which we cannot easily distinguish. This counting can be done as part of free page stealing with little additional overhead. The page stealing code is changed so that it considers free pages plus pages of the "good" migratetype for the decision whether to change pageblock's migratetype. The result should be more accurate migratetype of pageblocks wrt the actual pages in the pageblocks, when stealing from semi-occupied pageblocks. This should help the efficiency of page grouping by mobility. In testing based on 4.9 kernel with stress-highalloc from mmtests configured for order-4 GFP_KERNEL allocations, this patch has reduced the number of unmovable allocations falling back to movable pageblocks by 47%. The number of movable allocations falling back to other pageblocks are increased by 55%, but these events don't cause permanent fragmentation, so the tradeoff should be positive. Later patches also offset the movable fallback increase to some extent. [akpm@linux-foundation.org: merge fix] Link: http://lkml.kernel.org/r/20170307131545.28577-5-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c74
-rw-r--r--mm/page_isolation.c5
2 files changed, 62 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f1118b4dda4..d90792addeb9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1832,9 +1832,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1832 * Note that start_page and end_pages are not aligned on a pageblock 1832 * Note that start_page and end_pages are not aligned on a pageblock
1833 * boundary. If alignment is required, use move_freepages_block() 1833 * boundary. If alignment is required, use move_freepages_block()
1834 */ 1834 */
1835int move_freepages(struct zone *zone, 1835static int move_freepages(struct zone *zone,
1836 struct page *start_page, struct page *end_page, 1836 struct page *start_page, struct page *end_page,
1837 int migratetype) 1837 int migratetype, int *num_movable)
1838{ 1838{
1839 struct page *page; 1839 struct page *page;
1840 unsigned int order; 1840 unsigned int order;
@@ -1851,6 +1851,9 @@ int move_freepages(struct zone *zone,
1851 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1851 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1852#endif 1852#endif
1853 1853
1854 if (num_movable)
1855 *num_movable = 0;
1856
1854 for (page = start_page; page <= end_page;) { 1857 for (page = start_page; page <= end_page;) {
1855 if (!pfn_valid_within(page_to_pfn(page))) { 1858 if (!pfn_valid_within(page_to_pfn(page))) {
1856 page++; 1859 page++;
@@ -1861,6 +1864,15 @@ int move_freepages(struct zone *zone,
1861 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1864 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1862 1865
1863 if (!PageBuddy(page)) { 1866 if (!PageBuddy(page)) {
1867 /*
1868 * We assume that pages that could be isolated for
1869 * migration are movable. But we don't actually try
1870 * isolating, as that would be expensive.
1871 */
1872 if (num_movable &&
1873 (PageLRU(page) || __PageMovable(page)))
1874 (*num_movable)++;
1875
1864 page++; 1876 page++;
1865 continue; 1877 continue;
1866 } 1878 }
@@ -1876,7 +1888,7 @@ int move_freepages(struct zone *zone,
1876} 1888}
1877 1889
1878int move_freepages_block(struct zone *zone, struct page *page, 1890int move_freepages_block(struct zone *zone, struct page *page,
1879 int migratetype) 1891 int migratetype, int *num_movable)
1880{ 1892{
1881 unsigned long start_pfn, end_pfn; 1893 unsigned long start_pfn, end_pfn;
1882 struct page *start_page, *end_page; 1894 struct page *start_page, *end_page;
@@ -1893,7 +1905,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
1893 if (!zone_spans_pfn(zone, end_pfn)) 1905 if (!zone_spans_pfn(zone, end_pfn))
1894 return 0; 1906 return 0;
1895 1907
1896 return move_freepages(zone, start_page, end_page, migratetype); 1908 return move_freepages(zone, start_page, end_page, migratetype,
1909 num_movable);
1897} 1910}
1898 1911
1899static void change_pageblock_range(struct page *pageblock_page, 1912static void change_pageblock_range(struct page *pageblock_page,
@@ -1943,22 +1956,26 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
1943/* 1956/*
1944 * This function implements actual steal behaviour. If order is large enough, 1957 * This function implements actual steal behaviour. If order is large enough,
1945 * we can steal whole pageblock. If not, we first move freepages in this 1958 * we can steal whole pageblock. If not, we first move freepages in this
1946 * pageblock and check whether half of pages are moved or not. If half of 1959 * pageblock to our migratetype and determine how many already-allocated pages
1947 * pages are moved, we can change migratetype of pageblock and permanently 1960 * are there in the pageblock with a compatible migratetype. If at least half
1948 * use it's pages as requested migratetype in the future. 1961 * of pages are free or compatible, we can change migratetype of the pageblock
1962 * itself, so pages freed in the future will be put on the correct free list.
1949 */ 1963 */
1950static void steal_suitable_fallback(struct zone *zone, struct page *page, 1964static void steal_suitable_fallback(struct zone *zone, struct page *page,
1951 int start_type, bool whole_block) 1965 int start_type, bool whole_block)
1952{ 1966{
1953 unsigned int current_order = page_order(page); 1967 unsigned int current_order = page_order(page);
1954 struct free_area *area; 1968 struct free_area *area;
1955 int pages; 1969 int free_pages, movable_pages, alike_pages;
1970 int old_block_type;
1971
1972 old_block_type = get_pageblock_migratetype(page);
1956 1973
1957 /* 1974 /*
1958 * This can happen due to races and we want to prevent broken 1975 * This can happen due to races and we want to prevent broken
1959 * highatomic accounting. 1976 * highatomic accounting.
1960 */ 1977 */
1961 if (is_migrate_highatomic_page(page)) 1978 if (is_migrate_highatomic(old_block_type))
1962 goto single_page; 1979 goto single_page;
1963 1980
1964 /* Take ownership for orders >= pageblock_order */ 1981 /* Take ownership for orders >= pageblock_order */
@@ -1971,13 +1988,39 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
1971 if (!whole_block) 1988 if (!whole_block)
1972 goto single_page; 1989 goto single_page;
1973 1990
1974 pages = move_freepages_block(zone, page, start_type); 1991 free_pages = move_freepages_block(zone, page, start_type,
1992 &movable_pages);
1993 /*
1994 * Determine how many pages are compatible with our allocation.
1995 * For movable allocation, it's the number of movable pages which
1996 * we just obtained. For other types it's a bit more tricky.
1997 */
1998 if (start_type == MIGRATE_MOVABLE) {
1999 alike_pages = movable_pages;
2000 } else {
2001 /*
2002 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2003 * to MOVABLE pageblock, consider all non-movable pages as
2004 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2005 * vice versa, be conservative since we can't distinguish the
2006 * exact migratetype of non-movable pages.
2007 */
2008 if (old_block_type == MIGRATE_MOVABLE)
2009 alike_pages = pageblock_nr_pages
2010 - (free_pages + movable_pages);
2011 else
2012 alike_pages = 0;
2013 }
2014
1975 /* moving whole block can fail due to zone boundary conditions */ 2015 /* moving whole block can fail due to zone boundary conditions */
1976 if (!pages) 2016 if (!free_pages)
1977 goto single_page; 2017 goto single_page;
1978 2018
1979 /* Claim the whole block if over half of it is free */ 2019 /*
1980 if (pages >= (1 << (pageblock_order-1)) || 2020 * If a sufficient number of pages in the block are either free or of
2021 * comparable migratability as our allocation, claim the whole block.
2022 */
2023 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1981 page_group_by_mobility_disabled) 2024 page_group_by_mobility_disabled)
1982 set_pageblock_migratetype(page, start_type); 2025 set_pageblock_migratetype(page, start_type);
1983 2026
@@ -2055,7 +2098,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2055 && !is_migrate_cma(mt)) { 2098 && !is_migrate_cma(mt)) {
2056 zone->nr_reserved_highatomic += pageblock_nr_pages; 2099 zone->nr_reserved_highatomic += pageblock_nr_pages;
2057 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2100 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2058 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 2101 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2059 } 2102 }
2060 2103
2061out_unlock: 2104out_unlock:
@@ -2132,7 +2175,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2132 * may increase. 2175 * may increase.
2133 */ 2176 */
2134 set_pageblock_migratetype(page, ac->migratetype); 2177 set_pageblock_migratetype(page, ac->migratetype);
2135 ret = move_freepages_block(zone, page, ac->migratetype); 2178 ret = move_freepages_block(zone, page, ac->migratetype,
2179 NULL);
2136 if (ret) { 2180 if (ret) {
2137 spin_unlock_irqrestore(&zone->lock, flags); 2181 spin_unlock_irqrestore(&zone->lock, flags);
2138 return ret; 2182 return ret;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7927bbb54a4e..5092e4ef00c8 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -66,7 +66,8 @@ out:
66 66
67 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 67 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
68 zone->nr_isolate_pageblock++; 68 zone->nr_isolate_pageblock++;
69 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 69 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
70 NULL);
70 71
71 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 72 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
72 } 73 }
@@ -120,7 +121,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
120 * pageblock scanning for freepage moving. 121 * pageblock scanning for freepage moving.
121 */ 122 */
122 if (!isolated_page) { 123 if (!isolated_page) {
123 nr_pages = move_freepages_block(zone, page, migratetype); 124 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
124 __mod_zone_freepage_state(zone, nr_pages, migratetype); 125 __mod_zone_freepage_state(zone, nr_pages, migratetype);
125 } 126 }
126 set_pageblock_migratetype(page, migratetype); 127 set_pageblock_migratetype(page, migratetype);