summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAaron Lu <aaron.lu@intel.com>2018-04-05 19:24:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:26 -0400
commit0a5f4e5b45625e75db85b4968fc4c232d8091143 (patch)
tree3a6c3ef467dfa7e2fd8ba50687d8aa3dadfa9717 /mm/page_alloc.c
parent77ba9062e43c7e4966d9ff3afd87dca86542f86a (diff)
mm/free_pcppages_bulk: do not hold lock when picking pages to free
When freeing a batch of pages from Per-CPU-Pages(PCP) back to buddy, the zone->lock is held and then pages are chosen from PCP's migratetype list. While there is actually no need to do this 'choose part' under lock since it's PCP pages, the only CPU that can touch them is us and irq is also disabled. Moving this part outside could reduce lock held time and improve performance. Test with will-it-scale/page_fault1 full load: kernel Broadwell(2S) Skylake(2S) Broadwell(4S) Skylake(4S) v4.16-rc2+ 9034215 7971818 13667135 15677465 this patch 9536374 +5.6% 8314710 +4.3% 14070408 +3.0% 16675866 +6.4% What the test does is: starts $nr_cpu processes and each will repeatedly do the following for 5 minutes: - mmap 128M anonymouse space - write access to that space - munmap. The score is the aggregated iteration. https://github.com/antonblanchard/will-it-scale/blob/master/tests/page_fault1.c Link: http://lkml.kernel.org/r/20180301062845.26038-3-aaron.lu@intel.com Signed-off-by: Aaron Lu <aaron.lu@intel.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Kemi Wang <kemi.wang@intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08c195cdf161..e29a6ba050c8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1080,12 +1080,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1080 int migratetype = 0; 1080 int migratetype = 0;
1081 int batch_free = 0; 1081 int batch_free = 0;
1082 bool isolated_pageblocks; 1082 bool isolated_pageblocks;
1083 1083 struct page *page, *tmp;
1084 spin_lock(&zone->lock); 1084 LIST_HEAD(head);
1085 isolated_pageblocks = has_isolate_pageblock(zone);
1086 1085
1087 while (count) { 1086 while (count) {
1088 struct page *page;
1089 struct list_head *list; 1087 struct list_head *list;
1090 1088
1091 /* 1089 /*
@@ -1107,27 +1105,36 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1107 batch_free = count; 1105 batch_free = count;
1108 1106
1109 do { 1107 do {
1110 int mt; /* migratetype of the to-be-freed page */
1111
1112 page = list_last_entry(list, struct page, lru); 1108 page = list_last_entry(list, struct page, lru);
1113 /* must delete as __free_one_page list manipulates */ 1109 /* must delete to avoid corrupting pcp list */
1114 list_del(&page->lru); 1110 list_del(&page->lru);
1115 pcp->count--; 1111 pcp->count--;
1116 1112
1117 mt = get_pcppage_migratetype(page);
1118 /* MIGRATE_ISOLATE page should not go to pcplists */
1119 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1120 /* Pageblock could have been isolated meanwhile */
1121 if (unlikely(isolated_pageblocks))
1122 mt = get_pageblock_migratetype(page);
1123
1124 if (bulkfree_pcp_prepare(page)) 1113 if (bulkfree_pcp_prepare(page))
1125 continue; 1114 continue;
1126 1115
1127 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 1116 list_add_tail(&page->lru, &head);
1128 trace_mm_page_pcpu_drain(page, 0, mt);
1129 } while (--count && --batch_free && !list_empty(list)); 1117 } while (--count && --batch_free && !list_empty(list));
1130 } 1118 }
1119
1120 spin_lock(&zone->lock);
1121 isolated_pageblocks = has_isolate_pageblock(zone);
1122
1123 /*
1124 * Use safe version since after __free_one_page(),
1125 * page->lru.next will not point to original list.
1126 */
1127 list_for_each_entry_safe(page, tmp, &head, lru) {
1128 int mt = get_pcppage_migratetype(page);
1129 /* MIGRATE_ISOLATE page should not go to pcplists */
1130 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1131 /* Pageblock could have been isolated meanwhile */
1132 if (unlikely(isolated_pageblocks))
1133 mt = get_pageblock_migratetype(page);
1134
1135 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1136 trace_mm_page_pcpu_drain(page, 0, mt);
1137 }
1131 spin_unlock(&zone->lock); 1138 spin_unlock(&zone->lock);
1132} 1139}
1133 1140