diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 62 |
1 files changed, 37 insertions, 25 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2c25de46c58f..2f1118b4dda4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1948,23 +1948,44 @@ static bool can_steal_fallback(unsigned int order, int start_mt) | |||
1948 | * use it's pages as requested migratetype in the future. | 1948 | * use it's pages as requested migratetype in the future. |
1949 | */ | 1949 | */ |
1950 | static void steal_suitable_fallback(struct zone *zone, struct page *page, | 1950 | static void steal_suitable_fallback(struct zone *zone, struct page *page, |
1951 | int start_type) | 1951 | int start_type, bool whole_block) |
1952 | { | 1952 | { |
1953 | unsigned int current_order = page_order(page); | 1953 | unsigned int current_order = page_order(page); |
1954 | struct free_area *area; | ||
1954 | int pages; | 1955 | int pages; |
1955 | 1956 | ||
1957 | /* | ||
1958 | * This can happen due to races and we want to prevent broken | ||
1959 | * highatomic accounting. | ||
1960 | */ | ||
1961 | if (is_migrate_highatomic_page(page)) | ||
1962 | goto single_page; | ||
1963 | |||
1956 | /* Take ownership for orders >= pageblock_order */ | 1964 | /* Take ownership for orders >= pageblock_order */ |
1957 | if (current_order >= pageblock_order) { | 1965 | if (current_order >= pageblock_order) { |
1958 | change_pageblock_range(page, current_order, start_type); | 1966 | change_pageblock_range(page, current_order, start_type); |
1959 | return; | 1967 | goto single_page; |
1960 | } | 1968 | } |
1961 | 1969 | ||
1970 | /* We are not allowed to try stealing from the whole block */ | ||
1971 | if (!whole_block) | ||
1972 | goto single_page; | ||
1973 | |||
1962 | pages = move_freepages_block(zone, page, start_type); | 1974 | pages = move_freepages_block(zone, page, start_type); |
1975 | /* moving whole block can fail due to zone boundary conditions */ | ||
1976 | if (!pages) | ||
1977 | goto single_page; | ||
1963 | 1978 | ||
1964 | /* Claim the whole block if over half of it is free */ | 1979 | /* Claim the whole block if over half of it is free */ |
1965 | if (pages >= (1 << (pageblock_order-1)) || | 1980 | if (pages >= (1 << (pageblock_order-1)) || |
1966 | page_group_by_mobility_disabled) | 1981 | page_group_by_mobility_disabled) |
1967 | set_pageblock_migratetype(page, start_type); | 1982 | set_pageblock_migratetype(page, start_type); |
1983 | |||
1984 | return; | ||
1985 | |||
1986 | single_page: | ||
1987 | area = &zone->free_area[current_order]; | ||
1988 | list_move(&page->lru, &area->free_list[start_type]); | ||
1968 | } | 1989 | } |
1969 | 1990 | ||
1970 | /* | 1991 | /* |
@@ -2123,8 +2144,13 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, | |||
2123 | return false; | 2144 | return false; |
2124 | } | 2145 | } |
2125 | 2146 | ||
2126 | /* Remove an element from the buddy allocator from the fallback list */ | 2147 | /* |
2127 | static inline struct page * | 2148 | * Try finding a free buddy page on the fallback list and put it on the free |
2149 | * list of requested migratetype, possibly along with other pages from the same | ||
2150 | * block, depending on fragmentation avoidance heuristics. Returns true if | ||
2151 | * fallback was found so that __rmqueue_smallest() can grab it. | ||
2152 | */ | ||
2153 | static inline bool | ||
2128 | __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) | 2154 | __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) |
2129 | { | 2155 | { |
2130 | struct free_area *area; | 2156 | struct free_area *area; |
@@ -2145,32 +2171,17 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) | |||
2145 | 2171 | ||
2146 | page = list_first_entry(&area->free_list[fallback_mt], | 2172 | page = list_first_entry(&area->free_list[fallback_mt], |
2147 | struct page, lru); | 2173 | struct page, lru); |
2148 | if (can_steal && !is_migrate_highatomic_page(page)) | ||
2149 | steal_suitable_fallback(zone, page, start_migratetype); | ||
2150 | 2174 | ||
2151 | /* Remove the page from the freelists */ | 2175 | steal_suitable_fallback(zone, page, start_migratetype, |
2152 | area->nr_free--; | 2176 | can_steal); |
2153 | list_del(&page->lru); | ||
2154 | rmv_page_order(page); | ||
2155 | |||
2156 | expand(zone, page, order, current_order, area, | ||
2157 | start_migratetype); | ||
2158 | /* | ||
2159 | * The pcppage_migratetype may differ from pageblock's | ||
2160 | * migratetype depending on the decisions in | ||
2161 | * find_suitable_fallback(). This is OK as long as it does not | ||
2162 | * differ for MIGRATE_CMA pageblocks. Those can be used as | ||
2163 | * fallback only via special __rmqueue_cma_fallback() function | ||
2164 | */ | ||
2165 | set_pcppage_migratetype(page, start_migratetype); | ||
2166 | 2177 | ||
2167 | trace_mm_page_alloc_extfrag(page, order, current_order, | 2178 | trace_mm_page_alloc_extfrag(page, order, current_order, |
2168 | start_migratetype, fallback_mt); | 2179 | start_migratetype, fallback_mt); |
2169 | 2180 | ||
2170 | return page; | 2181 | return true; |
2171 | } | 2182 | } |
2172 | 2183 | ||
2173 | return NULL; | 2184 | return false; |
2174 | } | 2185 | } |
2175 | 2186 | ||
2176 | /* | 2187 | /* |
@@ -2182,13 +2193,14 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, | |||
2182 | { | 2193 | { |
2183 | struct page *page; | 2194 | struct page *page; |
2184 | 2195 | ||
2196 | retry: | ||
2185 | page = __rmqueue_smallest(zone, order, migratetype); | 2197 | page = __rmqueue_smallest(zone, order, migratetype); |
2186 | if (unlikely(!page)) { | 2198 | if (unlikely(!page)) { |
2187 | if (migratetype == MIGRATE_MOVABLE) | 2199 | if (migratetype == MIGRATE_MOVABLE) |
2188 | page = __rmqueue_cma_fallback(zone, order); | 2200 | page = __rmqueue_cma_fallback(zone, order); |
2189 | 2201 | ||
2190 | if (!page) | 2202 | if (!page && __rmqueue_fallback(zone, order, migratetype)) |
2191 | page = __rmqueue_fallback(zone, order, migratetype); | 2203 | goto retry; |
2192 | } | 2204 | } |
2193 | 2205 | ||
2194 | trace_mm_page_alloc_zone_locked(page, order, migratetype); | 2206 | trace_mm_page_alloc_zone_locked(page, order, migratetype); |