summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2017-04-20 17:37:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-04-20 18:30:18 -0400
commitd34b0733b452ca3ef1dee36436dab08b8aa6a85c (patch)
treebe8888f6aa5fd72319414f5fae54bfebcb418387 /mm/page_alloc.c
parentf61143c45077df4fa78e2f1ba455a00bbe1d5b8c (diff)
Revert "mm, page_alloc: only use per-cpu allocator for irq-safe requests"
This reverts commit 374ad05ab64. While the patch worked great for userspace allocations, the fact that softirq loses the per-cpu allocator caused problems. It needs to be redone taking into account that a separate list is needed for hard/soft IRQs or alternatively find a cheap way of detecting reentry due to an interrupt. Both are possible but sufficiently tricky that it shouldn't be rushed. Jesper had one method for allowing softirqs but reported that the cost was high enough that it performed similarly to a plain revert. His figures for netperf TCP_STREAM were as follows Baseline v4.10.0 : 60316 Mbit/s Current 4.11.0-rc6: 47491 Mbit/s Jesper's patch : 60662 Mbit/s This patch : 60106 Mbit/s As this is a regression, I wish to revert to noirq allocator for now and go back to the drawing board. Link: http://lkml.kernel.org/r/20170415145350.ixy7vtrzdzve57mh@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reported-by: Tariq Toukan <ttoukan.linux@gmail.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f3d603cef2c0..07efbc3a8656 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1090{ 1090{
1091 int migratetype = 0; 1091 int migratetype = 0;
1092 int batch_free = 0; 1092 int batch_free = 0;
1093 unsigned long nr_scanned, flags; 1093 unsigned long nr_scanned;
1094 bool isolated_pageblocks; 1094 bool isolated_pageblocks;
1095 1095
1096 spin_lock_irqsave(&zone->lock, flags); 1096 spin_lock(&zone->lock);
1097 isolated_pageblocks = has_isolate_pageblock(zone); 1097 isolated_pageblocks = has_isolate_pageblock(zone);
1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1099 if (nr_scanned) 1099 if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1142 trace_mm_page_pcpu_drain(page, 0, mt); 1142 trace_mm_page_pcpu_drain(page, 0, mt);
1143 } while (--count && --batch_free && !list_empty(list)); 1143 } while (--count && --batch_free && !list_empty(list));
1144 } 1144 }
1145 spin_unlock_irqrestore(&zone->lock, flags); 1145 spin_unlock(&zone->lock);
1146} 1146}
1147 1147
1148static void free_one_page(struct zone *zone, 1148static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
1150 unsigned int order, 1150 unsigned int order,
1151 int migratetype) 1151 int migratetype)
1152{ 1152{
1153 unsigned long nr_scanned, flags; 1153 unsigned long nr_scanned;
1154 spin_lock_irqsave(&zone->lock, flags); 1154 spin_lock(&zone->lock);
1155 __count_vm_events(PGFREE, 1 << order);
1156 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1157 if (nr_scanned) 1156 if (nr_scanned)
1158 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
1162 migratetype = get_pfnblock_migratetype(page, pfn); 1161 migratetype = get_pfnblock_migratetype(page, pfn);
1163 } 1162 }
1164 __free_one_page(page, pfn, zone, order, migratetype); 1163 __free_one_page(page, pfn, zone, order, migratetype);
1165 spin_unlock_irqrestore(&zone->lock, flags); 1164 spin_unlock(&zone->lock);
1166} 1165}
1167 1166
1168static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1167static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1240 1239
1241static void __free_pages_ok(struct page *page, unsigned int order) 1240static void __free_pages_ok(struct page *page, unsigned int order)
1242{ 1241{
1242 unsigned long flags;
1243 int migratetype; 1243 int migratetype;
1244 unsigned long pfn = page_to_pfn(page); 1244 unsigned long pfn = page_to_pfn(page);
1245 1245
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1247 return; 1247 return;
1248 1248
1249 migratetype = get_pfnblock_migratetype(page, pfn); 1249 migratetype = get_pfnblock_migratetype(page, pfn);
1250 local_irq_save(flags);
1251 __count_vm_events(PGFREE, 1 << order);
1250 free_one_page(page_zone(page), page, pfn, order, migratetype); 1252 free_one_page(page_zone(page), page, pfn, order, migratetype);
1253 local_irq_restore(flags);
1251} 1254}
1252 1255
1253static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1256static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2219 int migratetype, bool cold) 2222 int migratetype, bool cold)
2220{ 2223{
2221 int i, alloced = 0; 2224 int i, alloced = 0;
2222 unsigned long flags;
2223 2225
2224 spin_lock_irqsave(&zone->lock, flags); 2226 spin_lock(&zone->lock);
2225 for (i = 0; i < count; ++i) { 2227 for (i = 0; i < count; ++i) {
2226 struct page *page = __rmqueue(zone, order, migratetype); 2228 struct page *page = __rmqueue(zone, order, migratetype);
2227 if (unlikely(page == NULL)) 2229 if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2257 * pages added to the pcp list. 2259 * pages added to the pcp list.
2258 */ 2260 */
2259 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2261 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2260 spin_unlock_irqrestore(&zone->lock, flags); 2262 spin_unlock(&zone->lock);
2261 return alloced; 2263 return alloced;
2262} 2264}
2263 2265
@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
2485{ 2487{
2486 struct zone *zone = page_zone(page); 2488 struct zone *zone = page_zone(page);
2487 struct per_cpu_pages *pcp; 2489 struct per_cpu_pages *pcp;
2490 unsigned long flags;
2488 unsigned long pfn = page_to_pfn(page); 2491 unsigned long pfn = page_to_pfn(page);
2489 int migratetype; 2492 int migratetype;
2490 2493
2491 if (in_interrupt()) {
2492 __free_pages_ok(page, 0);
2493 return;
2494 }
2495
2496 if (!free_pcp_prepare(page)) 2494 if (!free_pcp_prepare(page))
2497 return; 2495 return;
2498 2496
2499 migratetype = get_pfnblock_migratetype(page, pfn); 2497 migratetype = get_pfnblock_migratetype(page, pfn);
2500 set_pcppage_migratetype(page, migratetype); 2498 set_pcppage_migratetype(page, migratetype);
2501 preempt_disable(); 2499 local_irq_save(flags);
2500 __count_vm_event(PGFREE);
2502 2501
2503 /* 2502 /*
2504 * We only track unmovable, reclaimable and movable on pcp lists. 2503 * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
2515 migratetype = MIGRATE_MOVABLE; 2514 migratetype = MIGRATE_MOVABLE;
2516 } 2515 }
2517 2516
2518 __count_vm_event(PGFREE);
2519 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2517 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2520 if (!cold) 2518 if (!cold)
2521 list_add(&page->lru, &pcp->lists[migratetype]); 2519 list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2529 } 2527 }
2530 2528
2531out: 2529out:
2532 preempt_enable(); 2530 local_irq_restore(flags);
2533} 2531}
2534 2532
2535/* 2533/*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2654{ 2652{
2655 struct page *page; 2653 struct page *page;
2656 2654
2657 VM_BUG_ON(in_interrupt());
2658
2659 do { 2655 do {
2660 if (list_empty(list)) { 2656 if (list_empty(list)) {
2661 pcp->count += rmqueue_bulk(zone, 0, 2657 pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2686 struct list_head *list; 2682 struct list_head *list;
2687 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2683 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2688 struct page *page; 2684 struct page *page;
2685 unsigned long flags;
2689 2686
2690 preempt_disable(); 2687 local_irq_save(flags);
2691 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2688 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2692 list = &pcp->lists[migratetype]; 2689 list = &pcp->lists[migratetype];
2693 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2690 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
@@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2695 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2692 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2696 zone_statistics(preferred_zone, zone); 2693 zone_statistics(preferred_zone, zone);
2697 } 2694 }
2698 preempt_enable(); 2695 local_irq_restore(flags);
2699 return page; 2696 return page;
2700} 2697}
2701 2698
@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
2711 unsigned long flags; 2708 unsigned long flags;
2712 struct page *page; 2709 struct page *page;
2713 2710
2714 if (likely(order == 0) && !in_interrupt()) { 2711 if (likely(order == 0)) {
2715 page = rmqueue_pcplist(preferred_zone, zone, order, 2712 page = rmqueue_pcplist(preferred_zone, zone, order,
2716 gfp_flags, migratetype); 2713 gfp_flags, migratetype);
2717 goto out; 2714 goto out;