diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2016-07-26 18:23:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 19:19:19 -0400 |
commit | 66c64223ad4e7a4a9161fcd9606426d9f57227ca (patch) | |
tree | 6533b432290ce4c708410a98b551cacfbdbd2e02 /mm/compaction.c | |
parent | 3b1d9ca65a80ced8ae737ffb11ae939334a882ca (diff) |
mm/compaction: split freepages without holding the zone lock
We don't need to split freepages with holding the zone lock. It will
cause more contention on zone lock so not desirable.
[rientjes@google.com: if __isolate_free_page() fails, avoid adding to freelist so we don't call map_pages() with it]
Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1606211447001.43430@chino.kir.corp.google.com
Link: http://lkml.kernel.org/r/1464230275-25791-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 47 |
1 files changed, 33 insertions, 14 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 6095055bd70f..3cda95451d93 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -64,13 +64,31 @@ static unsigned long release_freepages(struct list_head *freelist) | |||
64 | 64 | ||
65 | static void map_pages(struct list_head *list) | 65 | static void map_pages(struct list_head *list) |
66 | { | 66 | { |
67 | struct page *page; | 67 | unsigned int i, order, nr_pages; |
68 | struct page *page, *next; | ||
69 | LIST_HEAD(tmp_list); | ||
70 | |||
71 | list_for_each_entry_safe(page, next, list, lru) { | ||
72 | list_del(&page->lru); | ||
68 | 73 | ||
69 | list_for_each_entry(page, list, lru) { | 74 | order = page_private(page); |
70 | arch_alloc_page(page, 0); | 75 | nr_pages = 1 << order; |
71 | kernel_map_pages(page, 1, 1); | 76 | set_page_private(page, 0); |
72 | kasan_alloc_pages(page, 0); | 77 | set_page_refcounted(page); |
78 | |||
79 | arch_alloc_page(page, order); | ||
80 | kernel_map_pages(page, nr_pages, 1); | ||
81 | kasan_alloc_pages(page, order); | ||
82 | if (order) | ||
83 | split_page(page, order); | ||
84 | |||
85 | for (i = 0; i < nr_pages; i++) { | ||
86 | list_add(&page->lru, &tmp_list); | ||
87 | page++; | ||
88 | } | ||
73 | } | 89 | } |
90 | |||
91 | list_splice(&tmp_list, list); | ||
74 | } | 92 | } |
75 | 93 | ||
76 | static inline bool migrate_async_suitable(int migratetype) | 94 | static inline bool migrate_async_suitable(int migratetype) |
@@ -405,12 +423,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
405 | unsigned long flags = 0; | 423 | unsigned long flags = 0; |
406 | bool locked = false; | 424 | bool locked = false; |
407 | unsigned long blockpfn = *start_pfn; | 425 | unsigned long blockpfn = *start_pfn; |
426 | unsigned int order; | ||
408 | 427 | ||
409 | cursor = pfn_to_page(blockpfn); | 428 | cursor = pfn_to_page(blockpfn); |
410 | 429 | ||
411 | /* Isolate free pages. */ | 430 | /* Isolate free pages. */ |
412 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { | 431 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { |
413 | int isolated, i; | 432 | int isolated; |
414 | struct page *page = cursor; | 433 | struct page *page = cursor; |
415 | 434 | ||
416 | /* | 435 | /* |
@@ -476,17 +495,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
476 | goto isolate_fail; | 495 | goto isolate_fail; |
477 | } | 496 | } |
478 | 497 | ||
479 | /* Found a free page, break it into order-0 pages */ | 498 | /* Found a free page, will break it into order-0 pages */ |
480 | isolated = split_free_page(page); | 499 | order = page_order(page); |
500 | isolated = __isolate_free_page(page, order); | ||
481 | if (!isolated) | 501 | if (!isolated) |
482 | break; | 502 | break; |
503 | set_page_private(page, order); | ||
483 | 504 | ||
484 | total_isolated += isolated; | 505 | total_isolated += isolated; |
485 | cc->nr_freepages += isolated; | 506 | cc->nr_freepages += isolated; |
486 | for (i = 0; i < isolated; i++) { | 507 | list_add_tail(&page->lru, freelist); |
487 | list_add(&page->lru, freelist); | 508 | |
488 | page++; | ||
489 | } | ||
490 | if (!strict && cc->nr_migratepages <= cc->nr_freepages) { | 509 | if (!strict && cc->nr_migratepages <= cc->nr_freepages) { |
491 | blockpfn += isolated; | 510 | blockpfn += isolated; |
492 | break; | 511 | break; |
@@ -605,7 +624,7 @@ isolate_freepages_range(struct compact_control *cc, | |||
605 | */ | 624 | */ |
606 | } | 625 | } |
607 | 626 | ||
608 | /* split_free_page does not map the pages */ | 627 | /* __isolate_free_page() does not map the pages */ |
609 | map_pages(&freelist); | 628 | map_pages(&freelist); |
610 | 629 | ||
611 | if (pfn < end_pfn) { | 630 | if (pfn < end_pfn) { |
@@ -1102,7 +1121,7 @@ static void isolate_freepages(struct compact_control *cc) | |||
1102 | } | 1121 | } |
1103 | } | 1122 | } |
1104 | 1123 | ||
1105 | /* split_free_page does not map the pages */ | 1124 | /* __isolate_free_page() does not map the pages */ |
1106 | map_pages(freelist); | 1125 | map_pages(freelist); |
1107 | 1126 | ||
1108 | /* | 1127 | /* |