diff options
author | Dan Williams <dan.j.williams@intel.com> | 2019-05-14 18:41:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 22:52:48 -0400 |
commit | b03641af680959df57c275a80ff0dc116627c7ae (patch) | |
tree | 7420434fe3c82ebdd7b05cc59febe49e0e946366 /mm/page_alloc.c | |
parent | e900a918b0984ec8f2eb150b8477a47b75d17692 (diff) |
mm: move buddy list manipulations into helpers
In preparation for runtime randomization of the zone lists, take all
(well, most of) the list_*() functions in the buddy allocator and put
them in helper functions. Provide a common control point for injecting
additional behavior when freeing pages.
[dan.j.williams@intel.com: fix buddy list helpers]
Link: http://lkml.kernel.org/r/155033679702.1773410.13041474192173212653.stgit@dwillia2-desk3.amr.corp.intel.com
[vbabka@suse.cz: remove del_page_from_free_area() migratetype parameter]
Link: http://lkml.kernel.org/r/4672701b-6775-6efd-0797-b6242591419e@suse.cz
Link: http://lkml.kernel.org/r/154899812264.3165233.5219320056406926223.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Robert Elliott <elliott@hpe.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 65 |
1 files changed, 22 insertions, 43 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 548f8f5d3295..b674625762c4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -756,12 +756,6 @@ static inline void set_page_order(struct page *page, unsigned int order) | |||
756 | __SetPageBuddy(page); | 756 | __SetPageBuddy(page); |
757 | } | 757 | } |
758 | 758 | ||
759 | static inline void rmv_page_order(struct page *page) | ||
760 | { | ||
761 | __ClearPageBuddy(page); | ||
762 | set_page_private(page, 0); | ||
763 | } | ||
764 | |||
765 | /* | 759 | /* |
766 | * This function checks whether a page is free && is the buddy | 760 | * This function checks whether a page is free && is the buddy |
767 | * we can coalesce a page and its buddy if | 761 | * we can coalesce a page and its buddy if |
@@ -919,13 +913,10 @@ continue_merging: | |||
919 | * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, | 913 | * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, |
920 | * merge with it and move up one order. | 914 | * merge with it and move up one order. |
921 | */ | 915 | */ |
922 | if (page_is_guard(buddy)) { | 916 | if (page_is_guard(buddy)) |
923 | clear_page_guard(zone, buddy, order, migratetype); | 917 | clear_page_guard(zone, buddy, order, migratetype); |
924 | } else { | 918 | else |
925 | list_del(&buddy->lru); | 919 | del_page_from_free_area(buddy, &zone->free_area[order]); |
926 | zone->free_area[order].nr_free--; | ||
927 | rmv_page_order(buddy); | ||
928 | } | ||
929 | combined_pfn = buddy_pfn & pfn; | 920 | combined_pfn = buddy_pfn & pfn; |
930 | page = page + (combined_pfn - pfn); | 921 | page = page + (combined_pfn - pfn); |
931 | pfn = combined_pfn; | 922 | pfn = combined_pfn; |
@@ -975,15 +966,13 @@ done_merging: | |||
975 | higher_buddy = higher_page + (buddy_pfn - combined_pfn); | 966 | higher_buddy = higher_page + (buddy_pfn - combined_pfn); |
976 | if (pfn_valid_within(buddy_pfn) && | 967 | if (pfn_valid_within(buddy_pfn) && |
977 | page_is_buddy(higher_page, higher_buddy, order + 1)) { | 968 | page_is_buddy(higher_page, higher_buddy, order + 1)) { |
978 | list_add_tail(&page->lru, | 969 | add_to_free_area_tail(page, &zone->free_area[order], |
979 | &zone->free_area[order].free_list[migratetype]); | 970 | migratetype); |
980 | goto out; | 971 | return; |
981 | } | 972 | } |
982 | } | 973 | } |
983 | 974 | ||
984 | list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); | 975 | add_to_free_area(page, &zone->free_area[order], migratetype); |
985 | out: | ||
986 | zone->free_area[order].nr_free++; | ||
987 | } | 976 | } |
988 | 977 | ||
989 | /* | 978 | /* |
@@ -1974,8 +1963,7 @@ static inline void expand(struct zone *zone, struct page *page, | |||
1974 | if (set_page_guard(zone, &page[size], high, migratetype)) | 1963 | if (set_page_guard(zone, &page[size], high, migratetype)) |
1975 | continue; | 1964 | continue; |
1976 | 1965 | ||
1977 | list_add(&page[size].lru, &area->free_list[migratetype]); | 1966 | add_to_free_area(&page[size], area, migratetype); |
1978 | area->nr_free++; | ||
1979 | set_page_order(&page[size], high); | 1967 | set_page_order(&page[size], high); |
1980 | } | 1968 | } |
1981 | } | 1969 | } |
@@ -2117,13 +2105,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | |||
2117 | /* Find a page of the appropriate size in the preferred list */ | 2105 | /* Find a page of the appropriate size in the preferred list */ |
2118 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { | 2106 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { |
2119 | area = &(zone->free_area[current_order]); | 2107 | area = &(zone->free_area[current_order]); |
2120 | page = list_first_entry_or_null(&area->free_list[migratetype], | 2108 | page = get_page_from_free_area(area, migratetype); |
2121 | struct page, lru); | ||
2122 | if (!page) | 2109 | if (!page) |
2123 | continue; | 2110 | continue; |
2124 | list_del(&page->lru); | 2111 | del_page_from_free_area(page, area); |
2125 | rmv_page_order(page); | ||
2126 | area->nr_free--; | ||
2127 | expand(zone, page, order, current_order, area, migratetype); | 2112 | expand(zone, page, order, current_order, area, migratetype); |
2128 | set_pcppage_migratetype(page, migratetype); | 2113 | set_pcppage_migratetype(page, migratetype); |
2129 | return page; | 2114 | return page; |
@@ -2209,8 +2194,7 @@ static int move_freepages(struct zone *zone, | |||
2209 | } | 2194 | } |
2210 | 2195 | ||
2211 | order = page_order(page); | 2196 | order = page_order(page); |
2212 | list_move(&page->lru, | 2197 | move_to_free_area(page, &zone->free_area[order], migratetype); |
2213 | &zone->free_area[order].free_list[migratetype]); | ||
2214 | page += 1 << order; | 2198 | page += 1 << order; |
2215 | pages_moved += 1 << order; | 2199 | pages_moved += 1 << order; |
2216 | } | 2200 | } |
@@ -2398,7 +2382,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, | |||
2398 | 2382 | ||
2399 | single_page: | 2383 | single_page: |
2400 | area = &zone->free_area[current_order]; | 2384 | area = &zone->free_area[current_order]; |
2401 | list_move(&page->lru, &area->free_list[start_type]); | 2385 | move_to_free_area(page, area, start_type); |
2402 | } | 2386 | } |
2403 | 2387 | ||
2404 | /* | 2388 | /* |
@@ -2422,7 +2406,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, | |||
2422 | if (fallback_mt == MIGRATE_TYPES) | 2406 | if (fallback_mt == MIGRATE_TYPES) |
2423 | break; | 2407 | break; |
2424 | 2408 | ||
2425 | if (list_empty(&area->free_list[fallback_mt])) | 2409 | if (free_area_empty(area, fallback_mt)) |
2426 | continue; | 2410 | continue; |
2427 | 2411 | ||
2428 | if (can_steal_fallback(order, migratetype)) | 2412 | if (can_steal_fallback(order, migratetype)) |
@@ -2509,9 +2493,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, | |||
2509 | for (order = 0; order < MAX_ORDER; order++) { | 2493 | for (order = 0; order < MAX_ORDER; order++) { |
2510 | struct free_area *area = &(zone->free_area[order]); | 2494 | struct free_area *area = &(zone->free_area[order]); |
2511 | 2495 | ||
2512 | page = list_first_entry_or_null( | 2496 | page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); |
2513 | &area->free_list[MIGRATE_HIGHATOMIC], | ||
2514 | struct page, lru); | ||
2515 | if (!page) | 2497 | if (!page) |
2516 | continue; | 2498 | continue; |
2517 | 2499 | ||
@@ -2634,8 +2616,7 @@ find_smallest: | |||
2634 | VM_BUG_ON(current_order == MAX_ORDER); | 2616 | VM_BUG_ON(current_order == MAX_ORDER); |
2635 | 2617 | ||
2636 | do_steal: | 2618 | do_steal: |
2637 | page = list_first_entry(&area->free_list[fallback_mt], | 2619 | page = get_page_from_free_area(area, fallback_mt); |
2638 | struct page, lru); | ||
2639 | 2620 | ||
2640 | steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, | 2621 | steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, |
2641 | can_steal); | 2622 | can_steal); |
@@ -3072,6 +3053,7 @@ EXPORT_SYMBOL_GPL(split_page); | |||
3072 | 3053 | ||
3073 | int __isolate_free_page(struct page *page, unsigned int order) | 3054 | int __isolate_free_page(struct page *page, unsigned int order) |
3074 | { | 3055 | { |
3056 | struct free_area *area = &page_zone(page)->free_area[order]; | ||
3075 | unsigned long watermark; | 3057 | unsigned long watermark; |
3076 | struct zone *zone; | 3058 | struct zone *zone; |
3077 | int mt; | 3059 | int mt; |
@@ -3096,9 +3078,8 @@ int __isolate_free_page(struct page *page, unsigned int order) | |||
3096 | } | 3078 | } |
3097 | 3079 | ||
3098 | /* Remove page from free list */ | 3080 | /* Remove page from free list */ |
3099 | list_del(&page->lru); | 3081 | |
3100 | zone->free_area[order].nr_free--; | 3082 | del_page_from_free_area(page, area); |
3101 | rmv_page_order(page); | ||
3102 | 3083 | ||
3103 | /* | 3084 | /* |
3104 | * Set the pageblock if the isolated page is at least half of a | 3085 | * Set the pageblock if the isolated page is at least half of a |
@@ -3395,13 +3376,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |||
3395 | continue; | 3376 | continue; |
3396 | 3377 | ||
3397 | for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { | 3378 | for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { |
3398 | if (!list_empty(&area->free_list[mt])) | 3379 | if (!free_area_empty(area, mt)) |
3399 | return true; | 3380 | return true; |
3400 | } | 3381 | } |
3401 | 3382 | ||
3402 | #ifdef CONFIG_CMA | 3383 | #ifdef CONFIG_CMA |
3403 | if ((alloc_flags & ALLOC_CMA) && | 3384 | if ((alloc_flags & ALLOC_CMA) && |
3404 | !list_empty(&area->free_list[MIGRATE_CMA])) { | 3385 | !free_area_empty(area, MIGRATE_CMA)) { |
3405 | return true; | 3386 | return true; |
3406 | } | 3387 | } |
3407 | #endif | 3388 | #endif |
@@ -5328,7 +5309,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) | |||
5328 | 5309 | ||
5329 | types[order] = 0; | 5310 | types[order] = 0; |
5330 | for (type = 0; type < MIGRATE_TYPES; type++) { | 5311 | for (type = 0; type < MIGRATE_TYPES; type++) { |
5331 | if (!list_empty(&area->free_list[type])) | 5312 | if (!free_area_empty(area, type)) |
5332 | types[order] |= 1 << type; | 5313 | types[order] |= 1 << type; |
5333 | } | 5314 | } |
5334 | } | 5315 | } |
@@ -8501,9 +8482,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |||
8501 | pr_info("remove from free list %lx %d %lx\n", | 8482 | pr_info("remove from free list %lx %d %lx\n", |
8502 | pfn, 1 << order, end_pfn); | 8483 | pfn, 1 << order, end_pfn); |
8503 | #endif | 8484 | #endif |
8504 | list_del(&page->lru); | 8485 | del_page_from_free_area(page, &zone->free_area[order]); |
8505 | rmv_page_order(page); | ||
8506 | zone->free_area[order].nr_free--; | ||
8507 | for (i = 0; i < (1 << order); i++) | 8486 | for (i = 0; i < (1 << order); i++) |
8508 | SetPageReserved((page+i)); | 8487 | SetPageReserved((page+i)); |
8509 | pfn += (1 << order); | 8488 | pfn += (1 << order); |