diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/backing-dev.c | 23 | ||||
| -rw-r--r-- | mm/huge_memory.c | 6 | ||||
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/ksm.c | 11 | ||||
| -rw-r--r-- | mm/memblock.c | 6 | ||||
| -rw-r--r-- | mm/memcontrol.c | 109 | ||||
| -rw-r--r-- | mm/mempolicy.c | 3 | ||||
| -rw-r--r-- | mm/migrate.c | 2 | ||||
| -rw-r--r-- | mm/mlock.c | 3 | ||||
| -rw-r--r-- | mm/mmap.c | 17 | ||||
| -rw-r--r-- | mm/mprotect.c | 3 | ||||
| -rw-r--r-- | mm/nommu.c | 9 | ||||
| -rw-r--r-- | mm/page_alloc.c | 1 | ||||
| -rw-r--r-- | mm/page_cgroup.c | 4 | ||||
| -rw-r--r-- | mm/percpu-vm.c | 3 | ||||
| -rw-r--r-- | mm/swap.c | 8 | ||||
| -rw-r--r-- | mm/swap_state.c | 10 |
17 files changed, 117 insertions, 103 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7ba8feae11b8..dd8e2aafb07e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data) | |||
| 318 | if (bdi->wb.task) { | 318 | if (bdi->wb.task) { |
| 319 | trace_writeback_wake_thread(bdi); | 319 | trace_writeback_wake_thread(bdi); |
| 320 | wake_up_process(bdi->wb.task); | 320 | wake_up_process(bdi->wb.task); |
| 321 | } else { | 321 | } else if (bdi->dev) { |
| 322 | /* | 322 | /* |
| 323 | * When bdi tasks are inactive for long time, they are killed. | 323 | * When bdi tasks are inactive for long time, they are killed. |
| 324 | * In this case we have to wake-up the forker thread which | 324 | * In this case we have to wake-up the forker thread which |
| @@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev); | |||
| 584 | */ | 584 | */ |
| 585 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) | 585 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
| 586 | { | 586 | { |
| 587 | struct task_struct *task; | ||
| 588 | |||
| 587 | if (!bdi_cap_writeback_dirty(bdi)) | 589 | if (!bdi_cap_writeback_dirty(bdi)) |
| 588 | return; | 590 | return; |
| 589 | 591 | ||
| @@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
| 602 | * Finally, kill the kernel thread. We don't need to be RCU | 604 | * Finally, kill the kernel thread. We don't need to be RCU |
| 603 | * safe anymore, since the bdi is gone from visibility. | 605 | * safe anymore, since the bdi is gone from visibility. |
| 604 | */ | 606 | */ |
| 605 | if (bdi->wb.task) | 607 | spin_lock_bh(&bdi->wb_lock); |
| 606 | kthread_stop(bdi->wb.task); | 608 | task = bdi->wb.task; |
| 609 | bdi->wb.task = NULL; | ||
| 610 | spin_unlock_bh(&bdi->wb_lock); | ||
| 611 | |||
| 612 | if (task) | ||
| 613 | kthread_stop(task); | ||
| 607 | } | 614 | } |
| 608 | 615 | ||
| 609 | /* | 616 | /* |
| @@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) | |||
| 623 | 630 | ||
| 624 | void bdi_unregister(struct backing_dev_info *bdi) | 631 | void bdi_unregister(struct backing_dev_info *bdi) |
| 625 | { | 632 | { |
| 626 | if (bdi->dev) { | 633 | struct device *dev = bdi->dev; |
| 634 | |||
| 635 | if (dev) { | ||
| 627 | bdi_set_min_ratio(bdi, 0); | 636 | bdi_set_min_ratio(bdi, 0); |
| 628 | trace_writeback_bdi_unregister(bdi); | 637 | trace_writeback_bdi_unregister(bdi); |
| 629 | bdi_prune_sb(bdi); | 638 | bdi_prune_sb(bdi); |
| @@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi) | |||
| 632 | if (!bdi_cap_flush_forker(bdi)) | 641 | if (!bdi_cap_flush_forker(bdi)) |
| 633 | bdi_wb_shutdown(bdi); | 642 | bdi_wb_shutdown(bdi); |
| 634 | bdi_debug_unregister(bdi); | 643 | bdi_debug_unregister(bdi); |
| 635 | device_unregister(bdi->dev); | 644 | |
| 645 | spin_lock_bh(&bdi->wb_lock); | ||
| 636 | bdi->dev = NULL; | 646 | bdi->dev = NULL; |
| 647 | spin_unlock_bh(&bdi->wb_lock); | ||
| 648 | |||
| 649 | device_unregister(dev); | ||
| 637 | } | 650 | } |
| 638 | } | 651 | } |
| 639 | EXPORT_SYMBOL(bdi_unregister); | 652 | EXPORT_SYMBOL(bdi_unregister); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 91d3efb25d15..8f7fc394f636 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
| 671 | set_pmd_at(mm, haddr, pmd, entry); | 671 | set_pmd_at(mm, haddr, pmd, entry); |
| 672 | prepare_pmd_huge_pte(pgtable, mm); | 672 | prepare_pmd_huge_pte(pgtable, mm); |
| 673 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 673 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
| 674 | mm->nr_ptes++; | ||
| 674 | spin_unlock(&mm->page_table_lock); | 675 | spin_unlock(&mm->page_table_lock); |
| 675 | } | 676 | } |
| 676 | 677 | ||
| @@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 789 | pmd = pmd_mkold(pmd_wrprotect(pmd)); | 790 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
| 790 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); | 791 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
| 791 | prepare_pmd_huge_pte(pgtable, dst_mm); | 792 | prepare_pmd_huge_pte(pgtable, dst_mm); |
| 793 | dst_mm->nr_ptes++; | ||
| 792 | 794 | ||
| 793 | ret = 0; | 795 | ret = 0; |
| 794 | out_unlock: | 796 | out_unlock: |
| @@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
| 887 | } | 889 | } |
| 888 | kfree(pages); | 890 | kfree(pages); |
| 889 | 891 | ||
| 890 | mm->nr_ptes++; | ||
| 891 | smp_wmb(); /* make pte visible before pmd */ | 892 | smp_wmb(); /* make pte visible before pmd */ |
| 892 | pmd_populate(mm, pmd, pgtable); | 893 | pmd_populate(mm, pmd, pgtable); |
| 893 | page_remove_rmap(page); | 894 | page_remove_rmap(page); |
| @@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
| 1047 | VM_BUG_ON(page_mapcount(page) < 0); | 1048 | VM_BUG_ON(page_mapcount(page) < 0); |
| 1048 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); | 1049 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
| 1049 | VM_BUG_ON(!PageHead(page)); | 1050 | VM_BUG_ON(!PageHead(page)); |
| 1051 | tlb->mm->nr_ptes--; | ||
| 1050 | spin_unlock(&tlb->mm->page_table_lock); | 1052 | spin_unlock(&tlb->mm->page_table_lock); |
| 1051 | tlb_remove_page(tlb, page); | 1053 | tlb_remove_page(tlb, page); |
| 1052 | pte_free(tlb->mm, pgtable); | 1054 | pte_free(tlb->mm, pgtable); |
| @@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page, | |||
| 1375 | pte_unmap(pte); | 1377 | pte_unmap(pte); |
| 1376 | } | 1378 | } |
| 1377 | 1379 | ||
| 1378 | mm->nr_ptes++; | ||
| 1379 | smp_wmb(); /* make pte visible before pmd */ | 1380 | smp_wmb(); /* make pte visible before pmd */ |
| 1380 | /* | 1381 | /* |
| 1381 | * Up to this point the pmd is present and huge and | 1382 | * Up to this point the pmd is present and huge and |
| @@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 1988 | set_pmd_at(mm, address, pmd, _pmd); | 1989 | set_pmd_at(mm, address, pmd, _pmd); |
| 1989 | update_mmu_cache(vma, address, _pmd); | 1990 | update_mmu_cache(vma, address, _pmd); |
| 1990 | prepare_pmd_huge_pte(pgtable, mm); | 1991 | prepare_pmd_huge_pte(pgtable, mm); |
| 1991 | mm->nr_ptes--; | ||
| 1992 | spin_unlock(&mm->page_table_lock); | 1992 | spin_unlock(&mm->page_table_lock); |
| 1993 | 1993 | ||
| 1994 | #ifndef CONFIG_NUMA | 1994 | #ifndef CONFIG_NUMA |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5f34bd8dda34..a876871f6be5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
| 2277 | set_page_dirty(page); | 2277 | set_page_dirty(page); |
| 2278 | list_add(&page->lru, &page_list); | 2278 | list_add(&page->lru, &page_list); |
| 2279 | } | 2279 | } |
| 2280 | spin_unlock(&mm->page_table_lock); | ||
| 2281 | flush_tlb_range(vma, start, end); | 2280 | flush_tlb_range(vma, start, end); |
| 2281 | spin_unlock(&mm->page_table_lock); | ||
| 2282 | mmu_notifier_invalidate_range_end(mm, start, end); | 2282 | mmu_notifier_invalidate_range_end(mm, start, end); |
| 2283 | list_for_each_entry_safe(page, tmp, &page_list, lru) { | 2283 | list_for_each_entry_safe(page, tmp, &page_list, lru) { |
| 2284 | page_remove_rmap(page); | 2284 | page_remove_rmap(page); |
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/kthread.h> | 28 | #include <linux/kthread.h> |
| 29 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/memcontrol.h> | ||
| 32 | #include <linux/rbtree.h> | 31 | #include <linux/rbtree.h> |
| 33 | #include <linux/memory.h> | 32 | #include <linux/memory.h> |
| 34 | #include <linux/mmu_notifier.h> | 33 | #include <linux/mmu_notifier.h> |
| @@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
| 1572 | 1571 | ||
| 1573 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1572 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 1574 | if (new_page) { | 1573 | if (new_page) { |
| 1575 | /* | ||
| 1576 | * The memcg-specific accounting when moving | ||
| 1577 | * pages around the LRU lists relies on the | ||
| 1578 | * page's owner (memcg) to be valid. Usually, | ||
| 1579 | * pages are assigned to a new owner before | ||
| 1580 | * being put on the LRU list, but since this | ||
| 1581 | * is not the case here, the stale owner from | ||
| 1582 | * a previous allocation cycle must be reset. | ||
| 1583 | */ | ||
| 1584 | mem_cgroup_reset_owner(new_page); | ||
| 1585 | copy_user_highpage(new_page, page, address, vma); | 1574 | copy_user_highpage(new_page, page, address, vma); |
| 1586 | 1575 | ||
| 1587 | SetPageDirty(new_page); | 1576 | SetPageDirty(new_page); |
diff --git a/mm/memblock.c b/mm/memblock.c index 77b5f227e1d8..99f285599501 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -99,9 +99,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |||
| 99 | phys_addr_t this_start, this_end, cand; | 99 | phys_addr_t this_start, this_end, cand; |
| 100 | u64 i; | 100 | u64 i; |
| 101 | 101 | ||
| 102 | /* align @size to avoid excessive fragmentation on reserved array */ | ||
| 103 | size = round_up(size, align); | ||
| 104 | |||
| 105 | /* pump up @end */ | 102 | /* pump up @end */ |
| 106 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 103 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
| 107 | end = memblock.current_limit; | 104 | end = memblock.current_limit; |
| @@ -731,6 +728,9 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, | |||
| 731 | { | 728 | { |
| 732 | phys_addr_t found; | 729 | phys_addr_t found; |
| 733 | 730 | ||
| 731 | /* align @size to avoid excessive fragmentation on reserved array */ | ||
| 732 | size = round_up(size, align); | ||
| 733 | |||
| 734 | found = memblock_find_in_range_node(0, max_addr, size, align, nid); | 734 | found = memblock_find_in_range_node(0, max_addr, size, align, nid); |
| 735 | if (found && !memblock_reserve(found, size)) | 735 | if (found && !memblock_reserve(found, size)) |
| 736 | return found; | 736 | return found; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6728a7ae6f2d..5585dc3d3646 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, | |||
| 1042 | 1042 | ||
| 1043 | pc = lookup_page_cgroup(page); | 1043 | pc = lookup_page_cgroup(page); |
| 1044 | memcg = pc->mem_cgroup; | 1044 | memcg = pc->mem_cgroup; |
| 1045 | |||
| 1046 | /* | ||
| 1047 | * Surreptitiously switch any uncharged page to root: | ||
| 1048 | * an uncharged page off lru does nothing to secure | ||
| 1049 | * its former mem_cgroup from sudden removal. | ||
| 1050 | * | ||
| 1051 | * Our caller holds lru_lock, and PageCgroupUsed is updated | ||
| 1052 | * under page_cgroup lock: between them, they make all uses | ||
| 1053 | * of pc->mem_cgroup safe. | ||
| 1054 | */ | ||
| 1055 | if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) | ||
| 1056 | pc->mem_cgroup = memcg = root_mem_cgroup; | ||
| 1057 | |||
| 1045 | mz = page_cgroup_zoneinfo(memcg, page); | 1058 | mz = page_cgroup_zoneinfo(memcg, page); |
| 1046 | /* compound_order() is stabilized through lru_lock */ | 1059 | /* compound_order() is stabilized through lru_lock */ |
| 1047 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); | 1060 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); |
| @@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
| 2408 | struct page *page, | 2421 | struct page *page, |
| 2409 | unsigned int nr_pages, | 2422 | unsigned int nr_pages, |
| 2410 | struct page_cgroup *pc, | 2423 | struct page_cgroup *pc, |
| 2411 | enum charge_type ctype) | 2424 | enum charge_type ctype, |
| 2425 | bool lrucare) | ||
| 2412 | { | 2426 | { |
| 2427 | struct zone *uninitialized_var(zone); | ||
| 2428 | bool was_on_lru = false; | ||
| 2429 | |||
| 2413 | lock_page_cgroup(pc); | 2430 | lock_page_cgroup(pc); |
| 2414 | if (unlikely(PageCgroupUsed(pc))) { | 2431 | if (unlikely(PageCgroupUsed(pc))) { |
| 2415 | unlock_page_cgroup(pc); | 2432 | unlock_page_cgroup(pc); |
| @@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
| 2420 | * we don't need page_cgroup_lock about tail pages, becase they are not | 2437 | * we don't need page_cgroup_lock about tail pages, becase they are not |
| 2421 | * accessed by any other context at this point. | 2438 | * accessed by any other context at this point. |
| 2422 | */ | 2439 | */ |
| 2440 | |||
| 2441 | /* | ||
| 2442 | * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page | ||
| 2443 | * may already be on some other mem_cgroup's LRU. Take care of it. | ||
| 2444 | */ | ||
| 2445 | if (lrucare) { | ||
| 2446 | zone = page_zone(page); | ||
| 2447 | spin_lock_irq(&zone->lru_lock); | ||
| 2448 | if (PageLRU(page)) { | ||
| 2449 | ClearPageLRU(page); | ||
| 2450 | del_page_from_lru_list(zone, page, page_lru(page)); | ||
| 2451 | was_on_lru = true; | ||
| 2452 | } | ||
| 2453 | } | ||
| 2454 | |||
| 2423 | pc->mem_cgroup = memcg; | 2455 | pc->mem_cgroup = memcg; |
| 2424 | /* | 2456 | /* |
| 2425 | * We access a page_cgroup asynchronously without lock_page_cgroup(). | 2457 | * We access a page_cgroup asynchronously without lock_page_cgroup(). |
| @@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
| 2443 | break; | 2475 | break; |
| 2444 | } | 2476 | } |
| 2445 | 2477 | ||
| 2478 | if (lrucare) { | ||
| 2479 | if (was_on_lru) { | ||
| 2480 | VM_BUG_ON(PageLRU(page)); | ||
| 2481 | SetPageLRU(page); | ||
| 2482 | add_page_to_lru_list(zone, page, page_lru(page)); | ||
| 2483 | } | ||
| 2484 | spin_unlock_irq(&zone->lru_lock); | ||
| 2485 | } | ||
| 2486 | |||
| 2446 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); | 2487 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); |
| 2447 | unlock_page_cgroup(pc); | 2488 | unlock_page_cgroup(pc); |
| 2448 | WARN_ON_ONCE(PageLRU(page)); | 2489 | |
| 2449 | /* | 2490 | /* |
| 2450 | * "charge_statistics" updated event counter. Then, check it. | 2491 | * "charge_statistics" updated event counter. Then, check it. |
| 2451 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. | 2492 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. |
| @@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
| 2643 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); | 2684 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); |
| 2644 | if (ret == -ENOMEM) | 2685 | if (ret == -ENOMEM) |
| 2645 | return ret; | 2686 | return ret; |
| 2646 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); | 2687 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); |
| 2647 | return 0; | 2688 | return 0; |
| 2648 | } | 2689 | } |
| 2649 | 2690 | ||
| @@ -2663,35 +2704,6 @@ static void | |||
| 2663 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | 2704 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, |
| 2664 | enum charge_type ctype); | 2705 | enum charge_type ctype); |
| 2665 | 2706 | ||
| 2666 | static void | ||
| 2667 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, | ||
| 2668 | enum charge_type ctype) | ||
| 2669 | { | ||
| 2670 | struct page_cgroup *pc = lookup_page_cgroup(page); | ||
| 2671 | struct zone *zone = page_zone(page); | ||
| 2672 | unsigned long flags; | ||
| 2673 | bool removed = false; | ||
| 2674 | |||
| 2675 | /* | ||
| 2676 | * In some case, SwapCache, FUSE(splice_buf->radixtree), the page | ||
| 2677 | * is already on LRU. It means the page may on some other page_cgroup's | ||
| 2678 | * LRU. Take care of it. | ||
| 2679 | */ | ||
| 2680 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
| 2681 | if (PageLRU(page)) { | ||
| 2682 | del_page_from_lru_list(zone, page, page_lru(page)); | ||
| 2683 | ClearPageLRU(page); | ||
| 2684 | removed = true; | ||
| 2685 | } | ||
| 2686 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); | ||
| 2687 | if (removed) { | ||
| 2688 | add_page_to_lru_list(zone, page, page_lru(page)); | ||
| 2689 | SetPageLRU(page); | ||
| 2690 | } | ||
| 2691 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
| 2692 | return; | ||
| 2693 | } | ||
| 2694 | |||
| 2695 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 2707 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
| 2696 | gfp_t gfp_mask) | 2708 | gfp_t gfp_mask) |
| 2697 | { | 2709 | { |
| @@ -2769,13 +2781,16 @@ static void | |||
| 2769 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, | 2781 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, |
| 2770 | enum charge_type ctype) | 2782 | enum charge_type ctype) |
| 2771 | { | 2783 | { |
| 2784 | struct page_cgroup *pc; | ||
| 2785 | |||
| 2772 | if (mem_cgroup_disabled()) | 2786 | if (mem_cgroup_disabled()) |
| 2773 | return; | 2787 | return; |
| 2774 | if (!memcg) | 2788 | if (!memcg) |
| 2775 | return; | 2789 | return; |
| 2776 | cgroup_exclude_rmdir(&memcg->css); | 2790 | cgroup_exclude_rmdir(&memcg->css); |
| 2777 | 2791 | ||
| 2778 | __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); | 2792 | pc = lookup_page_cgroup(page); |
| 2793 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true); | ||
| 2779 | /* | 2794 | /* |
| 2780 | * Now swap is on-memory. This means this page may be | 2795 | * Now swap is on-memory. This means this page may be |
| 2781 | * counted both as mem and swap....double count. | 2796 | * counted both as mem and swap....double count. |
| @@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void) | |||
| 3027 | batch->memcg = NULL; | 3042 | batch->memcg = NULL; |
| 3028 | } | 3043 | } |
| 3029 | 3044 | ||
| 3030 | /* | ||
| 3031 | * A function for resetting pc->mem_cgroup for newly allocated pages. | ||
| 3032 | * This function should be called if the newpage will be added to LRU | ||
| 3033 | * before start accounting. | ||
| 3034 | */ | ||
| 3035 | void mem_cgroup_reset_owner(struct page *newpage) | ||
| 3036 | { | ||
| 3037 | struct page_cgroup *pc; | ||
| 3038 | |||
| 3039 | if (mem_cgroup_disabled()) | ||
| 3040 | return; | ||
| 3041 | |||
| 3042 | pc = lookup_page_cgroup(newpage); | ||
| 3043 | VM_BUG_ON(PageCgroupUsed(pc)); | ||
| 3044 | pc->mem_cgroup = root_mem_cgroup; | ||
| 3045 | } | ||
| 3046 | |||
| 3047 | #ifdef CONFIG_SWAP | 3045 | #ifdef CONFIG_SWAP |
| 3048 | /* | 3046 | /* |
| 3049 | * called after __delete_from_swap_cache() and drop "page" account. | 3047 | * called after __delete_from_swap_cache() and drop "page" account. |
| @@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
| 3248 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 3246 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; |
| 3249 | else | 3247 | else |
| 3250 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 3248 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
| 3251 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); | 3249 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); |
| 3252 | return ret; | 3250 | return ret; |
| 3253 | } | 3251 | } |
| 3254 | 3252 | ||
| @@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, | |||
| 3332 | * the newpage may be on LRU(or pagevec for LRU) already. We lock | 3330 | * the newpage may be on LRU(or pagevec for LRU) already. We lock |
| 3333 | * LRU while we overwrite pc->mem_cgroup. | 3331 | * LRU while we overwrite pc->mem_cgroup. |
| 3334 | */ | 3332 | */ |
| 3335 | __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); | 3333 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); |
| 3336 | } | 3334 | } |
| 3337 | 3335 | ||
| 3338 | #ifdef CONFIG_DEBUG_VM | 3336 | #ifdef CONFIG_DEBUG_VM |
| @@ -4414,6 +4412,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, | |||
| 4414 | */ | 4412 | */ |
| 4415 | BUG_ON(!thresholds); | 4413 | BUG_ON(!thresholds); |
| 4416 | 4414 | ||
| 4415 | if (!thresholds->primary) | ||
| 4416 | goto unlock; | ||
| 4417 | |||
| 4417 | usage = mem_cgroup_usage(memcg, type == _MEMSWAP); | 4418 | usage = mem_cgroup_usage(memcg, type == _MEMSWAP); |
| 4418 | 4419 | ||
| 4419 | /* Check if a threshold crossed before removing */ | 4420 | /* Check if a threshold crossed before removing */ |
| @@ -4462,7 +4463,7 @@ swap_buffers: | |||
| 4462 | 4463 | ||
| 4463 | /* To be sure that nobody uses thresholds */ | 4464 | /* To be sure that nobody uses thresholds */ |
| 4464 | synchronize_rcu(); | 4465 | synchronize_rcu(); |
| 4465 | 4466 | unlock: | |
| 4466 | mutex_unlock(&memcg->thresholds_lock); | 4467 | mutex_unlock(&memcg->thresholds_lock); |
| 4467 | } | 4468 | } |
| 4468 | 4469 | ||
| @@ -5074,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma, | |||
| 5074 | return NULL; | 5075 | return NULL; |
| 5075 | if (PageAnon(page)) { | 5076 | if (PageAnon(page)) { |
| 5076 | /* we don't move shared anon */ | 5077 | /* we don't move shared anon */ |
| 5077 | if (!move_anon() || page_mapcount(page) > 2) | 5078 | if (!move_anon() || page_mapcount(page) > 1) |
| 5078 | return NULL; | 5079 | return NULL; |
| 5079 | } else if (!move_file()) | 5080 | } else if (!move_file()) |
| 5080 | /* we ignore mapcount for file pages */ | 5081 | /* we ignore mapcount for file pages */ |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 06b145fb64ab..47296fee23db 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -640,10 +640,11 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, | |||
| 640 | unsigned long vmstart; | 640 | unsigned long vmstart; |
| 641 | unsigned long vmend; | 641 | unsigned long vmend; |
| 642 | 642 | ||
| 643 | vma = find_vma_prev(mm, start, &prev); | 643 | vma = find_vma(mm, start); |
| 644 | if (!vma || vma->vm_start > start) | 644 | if (!vma || vma->vm_start > start) |
| 645 | return -EFAULT; | 645 | return -EFAULT; |
| 646 | 646 | ||
| 647 | prev = vma->vm_prev; | ||
| 647 | if (start > vma->vm_start) | 648 | if (start > vma->vm_start) |
| 648 | prev = vma; | 649 | prev = vma; |
| 649 | 650 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index df141f60289e..1503b6b54ecb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
| 839 | if (!newpage) | 839 | if (!newpage) |
| 840 | return -ENOMEM; | 840 | return -ENOMEM; |
| 841 | 841 | ||
| 842 | mem_cgroup_reset_owner(newpage); | ||
| 843 | |||
| 844 | if (page_count(page) == 1) { | 842 | if (page_count(page) == 1) { |
| 845 | /* page was freed from under us. So we are done. */ | 843 | /* page was freed from under us. So we are done. */ |
| 846 | goto out; | 844 | goto out; |
diff --git a/mm/mlock.c b/mm/mlock.c index 4f4f53bdc65d..ef726e8aa8e9 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -385,10 +385,11 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
| 385 | return -EINVAL; | 385 | return -EINVAL; |
| 386 | if (end == start) | 386 | if (end == start) |
| 387 | return 0; | 387 | return 0; |
| 388 | vma = find_vma_prev(current->mm, start, &prev); | 388 | vma = find_vma(current->mm, start); |
| 389 | if (!vma || vma->vm_start > start) | 389 | if (!vma || vma->vm_start > start) |
| 390 | return -ENOMEM; | 390 | return -ENOMEM; |
| 391 | 391 | ||
| 392 | prev = vma->vm_prev; | ||
| 392 | if (start > vma->vm_start) | 393 | if (start > vma->vm_start) |
| 393 | prev = vma; | 394 | prev = vma; |
| 394 | 395 | ||
| @@ -1266,8 +1266,9 @@ munmap_back: | |||
| 1266 | vma->vm_pgoff = pgoff; | 1266 | vma->vm_pgoff = pgoff; |
| 1267 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 1267 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
| 1268 | 1268 | ||
| 1269 | error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ | ||
| 1270 | |||
| 1269 | if (file) { | 1271 | if (file) { |
| 1270 | error = -EINVAL; | ||
| 1271 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) | 1272 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
| 1272 | goto free_vma; | 1273 | goto free_vma; |
| 1273 | if (vm_flags & VM_DENYWRITE) { | 1274 | if (vm_flags & VM_DENYWRITE) { |
| @@ -1293,6 +1294,8 @@ munmap_back: | |||
| 1293 | pgoff = vma->vm_pgoff; | 1294 | pgoff = vma->vm_pgoff; |
| 1294 | vm_flags = vma->vm_flags; | 1295 | vm_flags = vma->vm_flags; |
| 1295 | } else if (vm_flags & VM_SHARED) { | 1296 | } else if (vm_flags & VM_SHARED) { |
| 1297 | if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) | ||
| 1298 | goto free_vma; | ||
| 1296 | error = shmem_zero_setup(vma); | 1299 | error = shmem_zero_setup(vma); |
| 1297 | if (error) | 1300 | if (error) |
| 1298 | goto free_vma; | 1301 | goto free_vma; |
| @@ -1605,7 +1608,6 @@ EXPORT_SYMBOL(find_vma); | |||
| 1605 | 1608 | ||
| 1606 | /* | 1609 | /* |
| 1607 | * Same as find_vma, but also return a pointer to the previous VMA in *pprev. | 1610 | * Same as find_vma, but also return a pointer to the previous VMA in *pprev. |
| 1608 | * Note: pprev is set to NULL when return value is NULL. | ||
| 1609 | */ | 1611 | */ |
| 1610 | struct vm_area_struct * | 1612 | struct vm_area_struct * |
| 1611 | find_vma_prev(struct mm_struct *mm, unsigned long addr, | 1613 | find_vma_prev(struct mm_struct *mm, unsigned long addr, |
| @@ -1614,7 +1616,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, | |||
| 1614 | struct vm_area_struct *vma; | 1616 | struct vm_area_struct *vma; |
| 1615 | 1617 | ||
| 1616 | vma = find_vma(mm, addr); | 1618 | vma = find_vma(mm, addr); |
| 1617 | *pprev = vma ? vma->vm_prev : NULL; | 1619 | if (vma) { |
| 1620 | *pprev = vma->vm_prev; | ||
| 1621 | } else { | ||
| 1622 | struct rb_node *rb_node = mm->mm_rb.rb_node; | ||
| 1623 | *pprev = NULL; | ||
| 1624 | while (rb_node) { | ||
| 1625 | *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); | ||
| 1626 | rb_node = rb_node->rb_right; | ||
| 1627 | } | ||
| 1628 | } | ||
| 1618 | return vma; | 1629 | return vma; |
| 1619 | } | 1630 | } |
| 1620 | 1631 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 5a688a2756be..f437d054c3bf 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -262,10 +262,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | |||
| 262 | 262 | ||
| 263 | down_write(¤t->mm->mmap_sem); | 263 | down_write(¤t->mm->mmap_sem); |
| 264 | 264 | ||
| 265 | vma = find_vma_prev(current->mm, start, &prev); | 265 | vma = find_vma(current->mm, start); |
| 266 | error = -ENOMEM; | 266 | error = -ENOMEM; |
| 267 | if (!vma) | 267 | if (!vma) |
| 268 | goto out; | 268 | goto out; |
| 269 | prev = vma->vm_prev; | ||
| 269 | if (unlikely(grows & PROT_GROWSDOWN)) { | 270 | if (unlikely(grows & PROT_GROWSDOWN)) { |
| 270 | if (vma->vm_start >= end) | 271 | if (vma->vm_start >= end) |
| 271 | goto out; | 272 | goto out; |
diff --git a/mm/nommu.c b/mm/nommu.c index b982290fd962..f59e170fceb4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
| 696 | if (vma->vm_file) { | 696 | if (vma->vm_file) { |
| 697 | mapping = vma->vm_file->f_mapping; | 697 | mapping = vma->vm_file->f_mapping; |
| 698 | 698 | ||
| 699 | mutex_lock(&mapping->i_mmap_mutex); | ||
| 699 | flush_dcache_mmap_lock(mapping); | 700 | flush_dcache_mmap_lock(mapping); |
| 700 | vma_prio_tree_insert(vma, &mapping->i_mmap); | 701 | vma_prio_tree_insert(vma, &mapping->i_mmap); |
| 701 | flush_dcache_mmap_unlock(mapping); | 702 | flush_dcache_mmap_unlock(mapping); |
| 703 | mutex_unlock(&mapping->i_mmap_mutex); | ||
| 702 | } | 704 | } |
| 703 | 705 | ||
| 704 | /* add the VMA to the tree */ | 706 | /* add the VMA to the tree */ |
| @@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
| 760 | if (vma->vm_file) { | 762 | if (vma->vm_file) { |
| 761 | mapping = vma->vm_file->f_mapping; | 763 | mapping = vma->vm_file->f_mapping; |
| 762 | 764 | ||
| 765 | mutex_lock(&mapping->i_mmap_mutex); | ||
| 763 | flush_dcache_mmap_lock(mapping); | 766 | flush_dcache_mmap_lock(mapping); |
| 764 | vma_prio_tree_remove(vma, &mapping->i_mmap); | 767 | vma_prio_tree_remove(vma, &mapping->i_mmap); |
| 765 | flush_dcache_mmap_unlock(mapping); | 768 | flush_dcache_mmap_unlock(mapping); |
| 769 | mutex_unlock(&mapping->i_mmap_mutex); | ||
| 766 | } | 770 | } |
| 767 | 771 | ||
| 768 | /* remove from the MM's tree and list */ | 772 | /* remove from the MM's tree and list */ |
| @@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
| 775 | 779 | ||
| 776 | if (vma->vm_next) | 780 | if (vma->vm_next) |
| 777 | vma->vm_next->vm_prev = vma->vm_prev; | 781 | vma->vm_next->vm_prev = vma->vm_prev; |
| 778 | |||
| 779 | vma->vm_mm = NULL; | ||
| 780 | } | 782 | } |
| 781 | 783 | ||
| 782 | /* | 784 | /* |
| @@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
| 2052 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 2054 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 2053 | 2055 | ||
| 2054 | down_write(&nommu_region_sem); | 2056 | down_write(&nommu_region_sem); |
| 2057 | mutex_lock(&inode->i_mapping->i_mmap_mutex); | ||
| 2055 | 2058 | ||
| 2056 | /* search for VMAs that fall within the dead zone */ | 2059 | /* search for VMAs that fall within the dead zone */ |
| 2057 | vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | 2060 | vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, |
| @@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
| 2059 | /* found one - only interested if it's shared out of the page | 2062 | /* found one - only interested if it's shared out of the page |
| 2060 | * cache */ | 2063 | * cache */ |
| 2061 | if (vma->vm_flags & VM_SHARED) { | 2064 | if (vma->vm_flags & VM_SHARED) { |
| 2065 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | ||
| 2062 | up_write(&nommu_region_sem); | 2066 | up_write(&nommu_region_sem); |
| 2063 | return -ETXTBSY; /* not quite true, but near enough */ | 2067 | return -ETXTBSY; /* not quite true, but near enough */ |
| 2064 | } | 2068 | } |
| @@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
| 2086 | } | 2090 | } |
| 2087 | } | 2091 | } |
| 2088 | 2092 | ||
| 2093 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | ||
| 2089 | up_write(&nommu_region_sem); | 2094 | up_write(&nommu_region_sem); |
| 2090 | return 0; | 2095 | return 0; |
| 2091 | } | 2096 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d2186ecb36f7..a13ded1938f0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 5236 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; | 5236 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; |
| 5237 | do_div(max, bucketsize); | 5237 | do_div(max, bucketsize); |
| 5238 | } | 5238 | } |
| 5239 | max = min(max, 0x80000000ULL); | ||
| 5239 | 5240 | ||
| 5240 | if (numentries > max) | 5241 | if (numentries > max) |
| 5241 | numentries = max; | 5242 | numentries = max; |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index de1616aa9b1e..1ccbd714059c 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
| @@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, | |||
| 379 | pgoff_t offset = swp_offset(ent); | 379 | pgoff_t offset = swp_offset(ent); |
| 380 | struct swap_cgroup_ctrl *ctrl; | 380 | struct swap_cgroup_ctrl *ctrl; |
| 381 | struct page *mappage; | 381 | struct page *mappage; |
| 382 | struct swap_cgroup *sc; | ||
| 382 | 383 | ||
| 383 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; | 384 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; |
| 384 | if (ctrlp) | 385 | if (ctrlp) |
| 385 | *ctrlp = ctrl; | 386 | *ctrlp = ctrl; |
| 386 | 387 | ||
| 387 | mappage = ctrl->map[offset / SC_PER_PAGE]; | 388 | mappage = ctrl->map[offset / SC_PER_PAGE]; |
| 388 | return page_address(mappage) + offset % SC_PER_PAGE; | 389 | sc = page_address(mappage); |
| 390 | return sc + offset % SC_PER_PAGE; | ||
| 389 | } | 391 | } |
| 390 | 392 | ||
| 391 | /** | 393 | /** |
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 12a48a88c0d8..405d331804c3 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
| @@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk, | |||
| 184 | page_end - page_start); | 184 | page_end - page_start); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | for (i = page_start; i < page_end; i++) | 187 | bitmap_clear(populated, page_start, page_end - page_start); |
| 188 | __clear_bit(i, populated); | ||
| 189 | } | 188 | } |
| 190 | 189 | ||
| 191 | /** | 190 | /** |
| @@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release); | |||
| 652 | void lru_add_page_tail(struct zone* zone, | 652 | void lru_add_page_tail(struct zone* zone, |
| 653 | struct page *page, struct page *page_tail) | 653 | struct page *page, struct page *page_tail) |
| 654 | { | 654 | { |
| 655 | int active; | 655 | int uninitialized_var(active); |
| 656 | enum lru_list lru; | 656 | enum lru_list lru; |
| 657 | const int file = 0; | 657 | const int file = 0; |
| 658 | 658 | ||
| @@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone, | |||
| 672 | active = 0; | 672 | active = 0; |
| 673 | lru = LRU_INACTIVE_ANON; | 673 | lru = LRU_INACTIVE_ANON; |
| 674 | } | 674 | } |
| 675 | update_page_reclaim_stat(zone, page_tail, file, active); | ||
| 676 | } else { | 675 | } else { |
| 677 | SetPageUnevictable(page_tail); | 676 | SetPageUnevictable(page_tail); |
| 678 | lru = LRU_UNEVICTABLE; | 677 | lru = LRU_UNEVICTABLE; |
| @@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone, | |||
| 693 | list_head = page_tail->lru.prev; | 692 | list_head = page_tail->lru.prev; |
| 694 | list_move_tail(&page_tail->lru, list_head); | 693 | list_move_tail(&page_tail->lru, list_head); |
| 695 | } | 694 | } |
| 695 | |||
| 696 | if (!PageUnevictable(page)) | ||
| 697 | update_page_reclaim_stat(zone, page_tail, file, active); | ||
| 696 | } | 698 | } |
| 697 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 699 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 698 | 700 | ||
| @@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg) | |||
| 710 | SetPageLRU(page); | 712 | SetPageLRU(page); |
| 711 | if (active) | 713 | if (active) |
| 712 | SetPageActive(page); | 714 | SetPageActive(page); |
| 713 | update_page_reclaim_stat(zone, page, file, active); | ||
| 714 | add_page_to_lru_list(zone, page, lru); | 715 | add_page_to_lru_list(zone, page, lru); |
| 716 | update_page_reclaim_stat(zone, page, file, active); | ||
| 715 | } | 717 | } |
| 716 | 718 | ||
| 717 | /* | 719 | /* |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 470038a91873..ea6b32d61873 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
| 300 | new_page = alloc_page_vma(gfp_mask, vma, addr); | 300 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
| 301 | if (!new_page) | 301 | if (!new_page) |
| 302 | break; /* Out of memory */ | 302 | break; /* Out of memory */ |
| 303 | /* | ||
| 304 | * The memcg-specific accounting when moving | ||
| 305 | * pages around the LRU lists relies on the | ||
| 306 | * page's owner (memcg) to be valid. Usually, | ||
| 307 | * pages are assigned to a new owner before | ||
| 308 | * being put on the LRU list, but since this | ||
| 309 | * is not the case here, the stale owner from | ||
| 310 | * a previous allocation cycle must be reset. | ||
| 311 | */ | ||
| 312 | mem_cgroup_reset_owner(new_page); | ||
| 313 | } | 303 | } |
| 314 | 304 | ||
| 315 | /* | 305 | /* |
