diff options
author | Xishi Qiu <qiuxishi@huawei.com> | 2017-05-03 17:52:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 18:52:08 -0400 |
commit | a6ffdc07847e74cc244c02ab6d0351a4a5d77281 (patch) | |
tree | e23f763ce214062030a7cc3fb89f4f31a6cf265a /mm/page_alloc.c | |
parent | 322b8afe4a65906c133102532e63a278775cc5f0 (diff) |
mm: use is_migrate_highatomic() to simplify the code
Introduce two helpers, is_migrate_highatomic() and is_migrate_highatomic_page().
Simplify the code, no functional changes.
[akpm@linux-foundation.org: use static inlines rather than macros, per mhocko]
Link: http://lkml.kernel.org/r/58B94F15.6060606@huawei.com
Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f82beddbd96f..34ac32428de8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2036,8 +2036,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, | |||
2036 | 2036 | ||
2037 | /* Yoink! */ | 2037 | /* Yoink! */ |
2038 | mt = get_pageblock_migratetype(page); | 2038 | mt = get_pageblock_migratetype(page); |
2039 | if (mt != MIGRATE_HIGHATOMIC && | 2039 | if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) |
2040 | !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { | 2040 | && !is_migrate_cma(mt)) { |
2041 | zone->nr_reserved_highatomic += pageblock_nr_pages; | 2041 | zone->nr_reserved_highatomic += pageblock_nr_pages; |
2042 | set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); | 2042 | set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); |
2043 | move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); | 2043 | move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); |
@@ -2094,8 +2094,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, | |||
2094 | * from highatomic to ac->migratetype. So we should | 2094 | * from highatomic to ac->migratetype. So we should |
2095 | * adjust the count once. | 2095 | * adjust the count once. |
2096 | */ | 2096 | */ |
2097 | if (get_pageblock_migratetype(page) == | 2097 | if (is_migrate_highatomic_page(page)) { |
2098 | MIGRATE_HIGHATOMIC) { | ||
2099 | /* | 2098 | /* |
2100 | * It should never happen but changes to | 2099 | * It should never happen but changes to |
2101 | * locking could inadvertently allow a per-cpu | 2100 | * locking could inadvertently allow a per-cpu |
@@ -2152,8 +2151,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) | |||
2152 | 2151 | ||
2153 | page = list_first_entry(&area->free_list[fallback_mt], | 2152 | page = list_first_entry(&area->free_list[fallback_mt], |
2154 | struct page, lru); | 2153 | struct page, lru); |
2155 | if (can_steal && | 2154 | if (can_steal && !is_migrate_highatomic_page(page)) |
2156 | get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC) | ||
2157 | steal_suitable_fallback(zone, page, start_migratetype); | 2155 | steal_suitable_fallback(zone, page, start_migratetype); |
2158 | 2156 | ||
2159 | /* Remove the page from the freelists */ | 2157 | /* Remove the page from the freelists */ |
@@ -2493,7 +2491,7 @@ void free_hot_cold_page(struct page *page, bool cold) | |||
2493 | /* | 2491 | /* |
2494 | * We only track unmovable, reclaimable and movable on pcp lists. | 2492 | * We only track unmovable, reclaimable and movable on pcp lists. |
2495 | * Free ISOLATE pages back to the allocator because they are being | 2493 | * Free ISOLATE pages back to the allocator because they are being |
2496 | * offlined but treat RESERVE as movable pages so we can get those | 2494 | * offlined but treat HIGHATOMIC as movable pages so we can get those |
2497 | * areas back if necessary. Otherwise, we may have to free | 2495 | * areas back if necessary. Otherwise, we may have to free |
2498 | * excessively into the page allocator | 2496 | * excessively into the page allocator |
2499 | */ | 2497 | */ |
@@ -2603,7 +2601,7 @@ int __isolate_free_page(struct page *page, unsigned int order) | |||
2603 | for (; page < endpage; page += pageblock_nr_pages) { | 2601 | for (; page < endpage; page += pageblock_nr_pages) { |
2604 | int mt = get_pageblock_migratetype(page); | 2602 | int mt = get_pageblock_migratetype(page); |
2605 | if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) | 2603 | if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) |
2606 | && mt != MIGRATE_HIGHATOMIC) | 2604 | && !is_migrate_highatomic(mt)) |
2607 | set_pageblock_migratetype(page, | 2605 | set_pageblock_migratetype(page, |
2608 | MIGRATE_MOVABLE); | 2606 | MIGRATE_MOVABLE); |
2609 | } | 2607 | } |