diff options
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 194 |
1 files changed, 101 insertions, 93 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b34ef4a32a3b..c3f09b2dda5f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -14,6 +14,12 @@ | |||
14 | * Copyright (C) 2012 Parallels Inc. and Google Inc. | 14 | * Copyright (C) 2012 Parallels Inc. and Google Inc. |
15 | * Authors: Glauber Costa and Suleiman Souhlal | 15 | * Authors: Glauber Costa and Suleiman Souhlal |
16 | * | 16 | * |
17 | * Native page reclaim | ||
18 | * Charge lifetime sanitation | ||
19 | * Lockless page tracking & accounting | ||
20 | * Unified hierarchy configuration model | ||
21 | * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner | ||
22 | * | ||
17 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
18 | * it under the terms of the GNU General Public License as published by | 24 | * it under the terms of the GNU General Public License as published by |
19 | * the Free Software Foundation; either version 2 of the License, or | 25 | * the Free Software Foundation; either version 2 of the License, or |
@@ -1436,15 +1442,17 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1436 | struct mem_cgroup *iter; | 1442 | struct mem_cgroup *iter; |
1437 | unsigned int i; | 1443 | unsigned int i; |
1438 | 1444 | ||
1439 | if (!p) | ||
1440 | return; | ||
1441 | |||
1442 | mutex_lock(&oom_info_lock); | 1445 | mutex_lock(&oom_info_lock); |
1443 | rcu_read_lock(); | 1446 | rcu_read_lock(); |
1444 | 1447 | ||
1445 | pr_info("Task in "); | 1448 | if (p) { |
1446 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); | 1449 | pr_info("Task in "); |
1447 | pr_cont(" killed as a result of limit of "); | 1450 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); |
1451 | pr_cont(" killed as a result of limit of "); | ||
1452 | } else { | ||
1453 | pr_info("Memory limit reached of cgroup "); | ||
1454 | } | ||
1455 | |||
1448 | pr_cont_cgroup_path(memcg->css.cgroup); | 1456 | pr_cont_cgroup_path(memcg->css.cgroup); |
1449 | pr_cont("\n"); | 1457 | pr_cont("\n"); |
1450 | 1458 | ||
@@ -1531,7 +1539,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
1531 | return; | 1539 | return; |
1532 | } | 1540 | } |
1533 | 1541 | ||
1534 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); | 1542 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); |
1535 | totalpages = mem_cgroup_get_limit(memcg) ? : 1; | 1543 | totalpages = mem_cgroup_get_limit(memcg) ? : 1; |
1536 | for_each_mem_cgroup_tree(iter, memcg) { | 1544 | for_each_mem_cgroup_tree(iter, memcg) { |
1537 | struct css_task_iter it; | 1545 | struct css_task_iter it; |
@@ -2779,92 +2787,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) | |||
2779 | } | 2787 | } |
2780 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 2788 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
2781 | 2789 | ||
2782 | /** | ||
2783 | * mem_cgroup_move_account - move account of the page | ||
2784 | * @page: the page | ||
2785 | * @nr_pages: number of regular pages (>1 for huge pages) | ||
2786 | * @from: mem_cgroup which the page is moved from. | ||
2787 | * @to: mem_cgroup which the page is moved to. @from != @to. | ||
2788 | * | ||
2789 | * The caller must confirm following. | ||
2790 | * - page is not on LRU (isolate_page() is useful.) | ||
2791 | * - compound_lock is held when nr_pages > 1 | ||
2792 | * | ||
2793 | * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" | ||
2794 | * from old cgroup. | ||
2795 | */ | ||
2796 | static int mem_cgroup_move_account(struct page *page, | ||
2797 | unsigned int nr_pages, | ||
2798 | struct mem_cgroup *from, | ||
2799 | struct mem_cgroup *to) | ||
2800 | { | ||
2801 | unsigned long flags; | ||
2802 | int ret; | ||
2803 | |||
2804 | VM_BUG_ON(from == to); | ||
2805 | VM_BUG_ON_PAGE(PageLRU(page), page); | ||
2806 | /* | ||
2807 | * The page is isolated from LRU. So, collapse function | ||
2808 | * will not handle this page. But page splitting can happen. | ||
2809 | * Do this check under compound_page_lock(). The caller should | ||
2810 | * hold it. | ||
2811 | */ | ||
2812 | ret = -EBUSY; | ||
2813 | if (nr_pages > 1 && !PageTransHuge(page)) | ||
2814 | goto out; | ||
2815 | |||
2816 | /* | ||
2817 | * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup | ||
2818 | * of its source page while we change it: page migration takes | ||
2819 | * both pages off the LRU, but page cache replacement doesn't. | ||
2820 | */ | ||
2821 | if (!trylock_page(page)) | ||
2822 | goto out; | ||
2823 | |||
2824 | ret = -EINVAL; | ||
2825 | if (page->mem_cgroup != from) | ||
2826 | goto out_unlock; | ||
2827 | |||
2828 | spin_lock_irqsave(&from->move_lock, flags); | ||
2829 | |||
2830 | if (!PageAnon(page) && page_mapped(page)) { | ||
2831 | __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], | ||
2832 | nr_pages); | ||
2833 | __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], | ||
2834 | nr_pages); | ||
2835 | } | ||
2836 | |||
2837 | if (PageWriteback(page)) { | ||
2838 | __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], | ||
2839 | nr_pages); | ||
2840 | __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], | ||
2841 | nr_pages); | ||
2842 | } | ||
2843 | |||
2844 | /* | ||
2845 | * It is safe to change page->mem_cgroup here because the page | ||
2846 | * is referenced, charged, and isolated - we can't race with | ||
2847 | * uncharging, charging, migration, or LRU putback. | ||
2848 | */ | ||
2849 | |||
2850 | /* caller should have done css_get */ | ||
2851 | page->mem_cgroup = to; | ||
2852 | spin_unlock_irqrestore(&from->move_lock, flags); | ||
2853 | |||
2854 | ret = 0; | ||
2855 | |||
2856 | local_irq_disable(); | ||
2857 | mem_cgroup_charge_statistics(to, page, nr_pages); | ||
2858 | memcg_check_events(to, page); | ||
2859 | mem_cgroup_charge_statistics(from, page, -nr_pages); | ||
2860 | memcg_check_events(from, page); | ||
2861 | local_irq_enable(); | ||
2862 | out_unlock: | ||
2863 | unlock_page(page); | ||
2864 | out: | ||
2865 | return ret; | ||
2866 | } | ||
2867 | |||
2868 | #ifdef CONFIG_MEMCG_SWAP | 2790 | #ifdef CONFIG_MEMCG_SWAP |
2869 | static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, | 2791 | static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, |
2870 | bool charge) | 2792 | bool charge) |
@@ -4816,6 +4738,92 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, | |||
4816 | return page; | 4738 | return page; |
4817 | } | 4739 | } |
4818 | 4740 | ||
4741 | /** | ||
4742 | * mem_cgroup_move_account - move account of the page | ||
4743 | * @page: the page | ||
4744 | * @nr_pages: number of regular pages (>1 for huge pages) | ||
4745 | * @from: mem_cgroup which the page is moved from. | ||
4746 | * @to: mem_cgroup which the page is moved to. @from != @to. | ||
4747 | * | ||
4748 | * The caller must confirm following. | ||
4749 | * - page is not on LRU (isolate_page() is useful.) | ||
4750 | * - compound_lock is held when nr_pages > 1 | ||
4751 | * | ||
4752 | * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" | ||
4753 | * from old cgroup. | ||
4754 | */ | ||
4755 | static int mem_cgroup_move_account(struct page *page, | ||
4756 | unsigned int nr_pages, | ||
4757 | struct mem_cgroup *from, | ||
4758 | struct mem_cgroup *to) | ||
4759 | { | ||
4760 | unsigned long flags; | ||
4761 | int ret; | ||
4762 | |||
4763 | VM_BUG_ON(from == to); | ||
4764 | VM_BUG_ON_PAGE(PageLRU(page), page); | ||
4765 | /* | ||
4766 | * The page is isolated from LRU. So, collapse function | ||
4767 | * will not handle this page. But page splitting can happen. | ||
4768 | * Do this check under compound_page_lock(). The caller should | ||
4769 | * hold it. | ||
4770 | */ | ||
4771 | ret = -EBUSY; | ||
4772 | if (nr_pages > 1 && !PageTransHuge(page)) | ||
4773 | goto out; | ||
4774 | |||
4775 | /* | ||
4776 | * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup | ||
4777 | * of its source page while we change it: page migration takes | ||
4778 | * both pages off the LRU, but page cache replacement doesn't. | ||
4779 | */ | ||
4780 | if (!trylock_page(page)) | ||
4781 | goto out; | ||
4782 | |||
4783 | ret = -EINVAL; | ||
4784 | if (page->mem_cgroup != from) | ||
4785 | goto out_unlock; | ||
4786 | |||
4787 | spin_lock_irqsave(&from->move_lock, flags); | ||
4788 | |||
4789 | if (!PageAnon(page) && page_mapped(page)) { | ||
4790 | __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], | ||
4791 | nr_pages); | ||
4792 | __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], | ||
4793 | nr_pages); | ||
4794 | } | ||
4795 | |||
4796 | if (PageWriteback(page)) { | ||
4797 | __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], | ||
4798 | nr_pages); | ||
4799 | __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], | ||
4800 | nr_pages); | ||
4801 | } | ||
4802 | |||
4803 | /* | ||
4804 | * It is safe to change page->mem_cgroup here because the page | ||
4805 | * is referenced, charged, and isolated - we can't race with | ||
4806 | * uncharging, charging, migration, or LRU putback. | ||
4807 | */ | ||
4808 | |||
4809 | /* caller should have done css_get */ | ||
4810 | page->mem_cgroup = to; | ||
4811 | spin_unlock_irqrestore(&from->move_lock, flags); | ||
4812 | |||
4813 | ret = 0; | ||
4814 | |||
4815 | local_irq_disable(); | ||
4816 | mem_cgroup_charge_statistics(to, page, nr_pages); | ||
4817 | memcg_check_events(to, page); | ||
4818 | mem_cgroup_charge_statistics(from, page, -nr_pages); | ||
4819 | memcg_check_events(from, page); | ||
4820 | local_irq_enable(); | ||
4821 | out_unlock: | ||
4822 | unlock_page(page); | ||
4823 | out: | ||
4824 | return ret; | ||
4825 | } | ||
4826 | |||
4819 | static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, | 4827 | static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, |
4820 | unsigned long addr, pte_t ptent, union mc_target *target) | 4828 | unsigned long addr, pte_t ptent, union mc_target *target) |
4821 | { | 4829 | { |