aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c59
1 files changed, 54 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 83637dfba110..4ba5e37127fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -371,8 +371,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
371 int nr_pages = 1 << order; 371 int nr_pages = 1 << order;
372 int bad = 0; 372 int bad = 0;
373 373
374 if (unlikely(compound_order(page) != order) || 374 if (unlikely(compound_order(page) != order)) {
375 unlikely(!PageHead(page))) {
376 bad_page(page); 375 bad_page(page);
377 bad++; 376 bad++;
378 } 377 }
@@ -611,6 +610,7 @@ static inline int free_pages_check(struct page *page)
611 bad_page(page); 610 bad_page(page);
612 return 1; 611 return 1;
613 } 612 }
613 reset_page_last_nid(page);
614 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 614 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
615 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 615 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
616 return 0; 616 return 0;
@@ -2612,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2612 int migratetype = allocflags_to_migratetype(gfp_mask); 2612 int migratetype = allocflags_to_migratetype(gfp_mask);
2613 unsigned int cpuset_mems_cookie; 2613 unsigned int cpuset_mems_cookie;
2614 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; 2614 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2615 struct mem_cgroup *memcg = NULL;
2615 2616
2616 gfp_mask &= gfp_allowed_mask; 2617 gfp_mask &= gfp_allowed_mask;
2617 2618
@@ -2630,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2630 if (unlikely(!zonelist->_zonerefs->zone)) 2631 if (unlikely(!zonelist->_zonerefs->zone))
2631 return NULL; 2632 return NULL;
2632 2633
2634 /*
2635 * Will only have any effect when __GFP_KMEMCG is set. This is
2636 * verified in the (always inline) callee
2637 */
2638 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2639 return NULL;
2640
2633retry_cpuset: 2641retry_cpuset:
2634 cpuset_mems_cookie = get_mems_allowed(); 2642 cpuset_mems_cookie = get_mems_allowed();
2635 2643
@@ -2665,6 +2673,8 @@ out:
2665 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2673 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2666 goto retry_cpuset; 2674 goto retry_cpuset;
2667 2675
2676 memcg_kmem_commit_charge(page, memcg, order);
2677
2668 return page; 2678 return page;
2669} 2679}
2670EXPORT_SYMBOL(__alloc_pages_nodemask); 2680EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2717,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order)
2717 2727
2718EXPORT_SYMBOL(free_pages); 2728EXPORT_SYMBOL(free_pages);
2719 2729
2730/*
2731 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2732 * pages allocated with __GFP_KMEMCG.
2733 *
2734 * Those pages are accounted to a particular memcg, embedded in the
2735 * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2736 * for that information only to find out that it is NULL for users who have no
2737 * interest in that whatsoever, we provide these functions.
2738 *
2739 * The caller knows better which flags it relies on.
2740 */
2741void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2742{
2743 memcg_kmem_uncharge_pages(page, order);
2744 __free_pages(page, order);
2745}
2746
2747void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2748{
2749 if (addr != 0) {
2750 VM_BUG_ON(!virt_addr_valid((void *)addr));
2751 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2752 }
2753}
2754
2720static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2755static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2721{ 2756{
2722 if (addr) { 2757 if (addr) {
@@ -3883,6 +3918,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3883 mminit_verify_page_links(page, zone, nid, pfn); 3918 mminit_verify_page_links(page, zone, nid, pfn);
3884 init_page_count(page); 3919 init_page_count(page);
3885 reset_page_mapcount(page); 3920 reset_page_mapcount(page);
3921 reset_page_last_nid(page);
3886 SetPageReserved(page); 3922 SetPageReserved(page);
3887 /* 3923 /*
3888 * Mark the block movable so that blocks are reserved for 3924 * Mark the block movable so that blocks are reserved for
@@ -4526,6 +4562,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4526 int ret; 4562 int ret;
4527 4563
4528 pgdat_resize_init(pgdat); 4564 pgdat_resize_init(pgdat);
4565#ifdef CONFIG_NUMA_BALANCING
4566 spin_lock_init(&pgdat->numabalancing_migrate_lock);
4567 pgdat->numabalancing_migrate_nr_pages = 0;
4568 pgdat->numabalancing_migrate_next_window = jiffies;
4569#endif
4529 init_waitqueue_head(&pgdat->kswapd_wait); 4570 init_waitqueue_head(&pgdat->kswapd_wait);
4530 init_waitqueue_head(&pgdat->pfmemalloc_wait); 4571 init_waitqueue_head(&pgdat->pfmemalloc_wait);
4531 pgdat_page_cgroup_init(pgdat); 4572 pgdat_page_cgroup_init(pgdat);
@@ -5800,7 +5841,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
5800 5841
5801 ret = migrate_pages(&cc->migratepages, 5842 ret = migrate_pages(&cc->migratepages,
5802 alloc_migrate_target, 5843 alloc_migrate_target,
5803 0, false, MIGRATE_SYNC); 5844 0, false, MIGRATE_SYNC,
5845 MR_CMA);
5804 } 5846 }
5805 5847
5806 putback_movable_pages(&cc->migratepages); 5848 putback_movable_pages(&cc->migratepages);
@@ -5936,8 +5978,15 @@ done:
5936 5978
5937void free_contig_range(unsigned long pfn, unsigned nr_pages) 5979void free_contig_range(unsigned long pfn, unsigned nr_pages)
5938{ 5980{
5939 for (; nr_pages--; ++pfn) 5981 unsigned int count = 0;
5940 __free_page(pfn_to_page(pfn)); 5982
5983 for (; nr_pages--; pfn++) {
5984 struct page *page = pfn_to_page(pfn);
5985
5986 count += page_count(page) != 1;
5987 __free_page(page);
5988 }
5989 WARN(count != 0, "%d pages are still in use!\n", count);
5941} 5990}
5942#endif 5991#endif
5943 5992