aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c57
1 files changed, 31 insertions, 26 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa64d2ffdc5..07efbc3a8656 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -873,7 +873,8 @@ done_merging:
873 higher_page = page + (combined_pfn - pfn); 873 higher_page = page + (combined_pfn - pfn);
874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
875 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 875 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
876 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 876 if (pfn_valid_within(buddy_pfn) &&
877 page_is_buddy(higher_page, higher_buddy, order + 1)) {
877 list_add_tail(&page->lru, 878 list_add_tail(&page->lru,
878 &zone->free_area[order].free_list[migratetype]); 879 &zone->free_area[order].free_list[migratetype]);
879 goto out; 880 goto out;
@@ -1089,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1089{ 1090{
1090 int migratetype = 0; 1091 int migratetype = 0;
1091 int batch_free = 0; 1092 int batch_free = 0;
1092 unsigned long nr_scanned, flags; 1093 unsigned long nr_scanned;
1093 bool isolated_pageblocks; 1094 bool isolated_pageblocks;
1094 1095
1095 spin_lock_irqsave(&zone->lock, flags); 1096 spin_lock(&zone->lock);
1096 isolated_pageblocks = has_isolate_pageblock(zone); 1097 isolated_pageblocks = has_isolate_pageblock(zone);
1097 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1098 if (nr_scanned) 1099 if (nr_scanned)
@@ -1141,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1141 trace_mm_page_pcpu_drain(page, 0, mt); 1142 trace_mm_page_pcpu_drain(page, 0, mt);
1142 } while (--count && --batch_free && !list_empty(list)); 1143 } while (--count && --batch_free && !list_empty(list));
1143 } 1144 }
1144 spin_unlock_irqrestore(&zone->lock, flags); 1145 spin_unlock(&zone->lock);
1145} 1146}
1146 1147
1147static void free_one_page(struct zone *zone, 1148static void free_one_page(struct zone *zone,
@@ -1149,9 +1150,8 @@ static void free_one_page(struct zone *zone,
1149 unsigned int order, 1150 unsigned int order,
1150 int migratetype) 1151 int migratetype)
1151{ 1152{
1152 unsigned long nr_scanned, flags; 1153 unsigned long nr_scanned;
1153 spin_lock_irqsave(&zone->lock, flags); 1154 spin_lock(&zone->lock);
1154 __count_vm_events(PGFREE, 1 << order);
1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1156 if (nr_scanned) 1156 if (nr_scanned)
1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1161,7 +1161,7 @@ static void free_one_page(struct zone *zone,
1161 migratetype = get_pfnblock_migratetype(page, pfn); 1161 migratetype = get_pfnblock_migratetype(page, pfn);
1162 } 1162 }
1163 __free_one_page(page, pfn, zone, order, migratetype); 1163 __free_one_page(page, pfn, zone, order, migratetype);
1164 spin_unlock_irqrestore(&zone->lock, flags); 1164 spin_unlock(&zone->lock);
1165} 1165}
1166 1166
1167static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1167static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1239,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1239 1239
1240static void __free_pages_ok(struct page *page, unsigned int order) 1240static void __free_pages_ok(struct page *page, unsigned int order)
1241{ 1241{
1242 unsigned long flags;
1242 int migratetype; 1243 int migratetype;
1243 unsigned long pfn = page_to_pfn(page); 1244 unsigned long pfn = page_to_pfn(page);
1244 1245
@@ -1246,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1246 return; 1247 return;
1247 1248
1248 migratetype = get_pfnblock_migratetype(page, pfn); 1249 migratetype = get_pfnblock_migratetype(page, pfn);
1250 local_irq_save(flags);
1251 __count_vm_events(PGFREE, 1 << order);
1249 free_one_page(page_zone(page), page, pfn, order, migratetype); 1252 free_one_page(page_zone(page), page, pfn, order, migratetype);
1253 local_irq_restore(flags);
1250} 1254}
1251 1255
1252static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1256static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2218,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2218 int migratetype, bool cold) 2222 int migratetype, bool cold)
2219{ 2223{
2220 int i, alloced = 0; 2224 int i, alloced = 0;
2221 unsigned long flags;
2222 2225
2223 spin_lock_irqsave(&zone->lock, flags); 2226 spin_lock(&zone->lock);
2224 for (i = 0; i < count; ++i) { 2227 for (i = 0; i < count; ++i) {
2225 struct page *page = __rmqueue(zone, order, migratetype); 2228 struct page *page = __rmqueue(zone, order, migratetype);
2226 if (unlikely(page == NULL)) 2229 if (unlikely(page == NULL))
@@ -2256,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2256 * pages added to the pcp list. 2259 * pages added to the pcp list.
2257 */ 2260 */
2258 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2261 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2259 spin_unlock_irqrestore(&zone->lock, flags); 2262 spin_unlock(&zone->lock);
2260 return alloced; 2263 return alloced;
2261} 2264}
2262 2265
@@ -2372,6 +2375,13 @@ void drain_all_pages(struct zone *zone)
2372 */ 2375 */
2373 static cpumask_t cpus_with_pcps; 2376 static cpumask_t cpus_with_pcps;
2374 2377
2378 /*
2379 * Make sure nobody triggers this path before mm_percpu_wq is fully
2380 * initialized.
2381 */
2382 if (WARN_ON_ONCE(!mm_percpu_wq))
2383 return;
2384
2375 /* Workqueues cannot recurse */ 2385 /* Workqueues cannot recurse */
2376 if (current->flags & PF_WQ_WORKER) 2386 if (current->flags & PF_WQ_WORKER)
2377 return; 2387 return;
@@ -2421,7 +2431,7 @@ void drain_all_pages(struct zone *zone)
2421 for_each_cpu(cpu, &cpus_with_pcps) { 2431 for_each_cpu(cpu, &cpus_with_pcps) {
2422 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2432 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2423 INIT_WORK(work, drain_local_pages_wq); 2433 INIT_WORK(work, drain_local_pages_wq);
2424 schedule_work_on(cpu, work); 2434 queue_work_on(cpu, mm_percpu_wq, work);
2425 } 2435 }
2426 for_each_cpu(cpu, &cpus_with_pcps) 2436 for_each_cpu(cpu, &cpus_with_pcps)
2427 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2437 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -2477,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
2477{ 2487{
2478 struct zone *zone = page_zone(page); 2488 struct zone *zone = page_zone(page);
2479 struct per_cpu_pages *pcp; 2489 struct per_cpu_pages *pcp;
2490 unsigned long flags;
2480 unsigned long pfn = page_to_pfn(page); 2491 unsigned long pfn = page_to_pfn(page);
2481 int migratetype; 2492 int migratetype;
2482 2493
2483 if (in_interrupt()) {
2484 __free_pages_ok(page, 0);
2485 return;
2486 }
2487
2488 if (!free_pcp_prepare(page)) 2494 if (!free_pcp_prepare(page))
2489 return; 2495 return;
2490 2496
2491 migratetype = get_pfnblock_migratetype(page, pfn); 2497 migratetype = get_pfnblock_migratetype(page, pfn);
2492 set_pcppage_migratetype(page, migratetype); 2498 set_pcppage_migratetype(page, migratetype);
2493 preempt_disable(); 2499 local_irq_save(flags);
2500 __count_vm_event(PGFREE);
2494 2501
2495 /* 2502 /*
2496 * We only track unmovable, reclaimable and movable on pcp lists. 2503 * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2507,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
2507 migratetype = MIGRATE_MOVABLE; 2514 migratetype = MIGRATE_MOVABLE;
2508 } 2515 }
2509 2516
2510 __count_vm_event(PGFREE);
2511 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2517 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2512 if (!cold) 2518 if (!cold)
2513 list_add(&page->lru, &pcp->lists[migratetype]); 2519 list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2521,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2521 } 2527 }
2522 2528
2523out: 2529out:
2524 preempt_enable(); 2530 local_irq_restore(flags);
2525} 2531}
2526 2532
2527/* 2533/*
@@ -2646,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2646{ 2652{
2647 struct page *page; 2653 struct page *page;
2648 2654
2649 VM_BUG_ON(in_interrupt());
2650
2651 do { 2655 do {
2652 if (list_empty(list)) { 2656 if (list_empty(list)) {
2653 pcp->count += rmqueue_bulk(zone, 0, 2657 pcp->count += rmqueue_bulk(zone, 0,
@@ -2678,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2678 struct list_head *list; 2682 struct list_head *list;
2679 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2683 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2680 struct page *page; 2684 struct page *page;
2685 unsigned long flags;
2681 2686
2682 preempt_disable(); 2687 local_irq_save(flags);
2683 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2688 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2684 list = &pcp->lists[migratetype]; 2689 list = &pcp->lists[migratetype];
2685 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2690 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
@@ -2687,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2687 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2692 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2688 zone_statistics(preferred_zone, zone); 2693 zone_statistics(preferred_zone, zone);
2689 } 2694 }
2690 preempt_enable(); 2695 local_irq_restore(flags);
2691 return page; 2696 return page;
2692} 2697}
2693 2698
@@ -2703,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
2703 unsigned long flags; 2708 unsigned long flags;
2704 struct page *page; 2709 struct page *page;
2705 2710
2706 if (likely(order == 0) && !in_interrupt()) { 2711 if (likely(order == 0)) {
2707 page = rmqueue_pcplist(preferred_zone, zone, order, 2712 page = rmqueue_pcplist(preferred_zone, zone, order,
2708 gfp_flags, migratetype); 2713 gfp_flags, migratetype);
2709 goto out; 2714 goto out;
@@ -4518,13 +4523,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4518 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4523 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4519 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4524 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4520 K(node_page_state(pgdat, NR_WRITEBACK)), 4525 K(node_page_state(pgdat, NR_WRITEBACK)),
4526 K(node_page_state(pgdat, NR_SHMEM)),
4521#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4527#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4522 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4528 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4523 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4529 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4524 * HPAGE_PMD_NR), 4530 * HPAGE_PMD_NR),
4525 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4526#endif 4532#endif
4527 K(node_page_state(pgdat, NR_SHMEM)),
4528 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4533 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4529 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4534 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4530 node_page_state(pgdat, NR_PAGES_SCANNED), 4535 node_page_state(pgdat, NR_PAGES_SCANNED),