aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e9f5cc5fb59..8deb9d0fd5b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -556,8 +556,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
556 page = list_entry(list->prev, struct page, lru); 556 page = list_entry(list->prev, struct page, lru);
557 /* must delete as __free_one_page list manipulates */ 557 /* must delete as __free_one_page list manipulates */
558 list_del(&page->lru); 558 list_del(&page->lru);
559 __free_one_page(page, zone, 0, migratetype); 559 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
560 trace_mm_page_pcpu_drain(page, 0, migratetype); 560 __free_one_page(page, zone, 0, page_private(page));
561 trace_mm_page_pcpu_drain(page, 0, page_private(page));
561 } while (--count && --batch_free && !list_empty(list)); 562 } while (--count && --batch_free && !list_empty(list));
562 } 563 }
563 spin_unlock(&zone->lock); 564 spin_unlock(&zone->lock);
@@ -1222,10 +1223,10 @@ again:
1222 } 1223 }
1223 spin_lock_irqsave(&zone->lock, flags); 1224 spin_lock_irqsave(&zone->lock, flags);
1224 page = __rmqueue(zone, order, migratetype); 1225 page = __rmqueue(zone, order, migratetype);
1225 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1226 spin_unlock(&zone->lock); 1226 spin_unlock(&zone->lock);
1227 if (!page) 1227 if (!page)
1228 goto failed; 1228 goto failed;
1229 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1229 } 1230 }
1230 1231
1231 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1232 __count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -3998,7 +3999,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3998 } 3999 }
3999 4000
4000 /* Merge backward if suitable */ 4001 /* Merge backward if suitable */
4001 if (start_pfn < early_node_map[i].end_pfn && 4002 if (start_pfn < early_node_map[i].start_pfn &&
4002 end_pfn >= early_node_map[i].start_pfn) { 4003 end_pfn >= early_node_map[i].start_pfn) {
4003 early_node_map[i].start_pfn = start_pfn; 4004 early_node_map[i].start_pfn = start_pfn;
4004 return; 4005 return;