diff options
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 57 |
1 files changed, 37 insertions, 20 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3630d577e987..d9a21d06b862 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -715,13 +715,20 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
715 | unsigned long haddr, pmd_t *pmd, | 715 | unsigned long haddr, pmd_t *pmd, |
716 | struct page *page) | 716 | struct page *page) |
717 | { | 717 | { |
718 | struct mem_cgroup *memcg; | ||
718 | pgtable_t pgtable; | 719 | pgtable_t pgtable; |
719 | spinlock_t *ptl; | 720 | spinlock_t *ptl; |
720 | 721 | ||
721 | VM_BUG_ON_PAGE(!PageCompound(page), page); | 722 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
723 | |||
724 | if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg)) | ||
725 | return VM_FAULT_OOM; | ||
726 | |||
722 | pgtable = pte_alloc_one(mm, haddr); | 727 | pgtable = pte_alloc_one(mm, haddr); |
723 | if (unlikely(!pgtable)) | 728 | if (unlikely(!pgtable)) { |
729 | mem_cgroup_cancel_charge(page, memcg); | ||
724 | return VM_FAULT_OOM; | 730 | return VM_FAULT_OOM; |
731 | } | ||
725 | 732 | ||
726 | clear_huge_page(page, haddr, HPAGE_PMD_NR); | 733 | clear_huge_page(page, haddr, HPAGE_PMD_NR); |
727 | /* | 734 | /* |
@@ -734,7 +741,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
734 | ptl = pmd_lock(mm, pmd); | 741 | ptl = pmd_lock(mm, pmd); |
735 | if (unlikely(!pmd_none(*pmd))) { | 742 | if (unlikely(!pmd_none(*pmd))) { |
736 | spin_unlock(ptl); | 743 | spin_unlock(ptl); |
737 | mem_cgroup_uncharge_page(page); | 744 | mem_cgroup_cancel_charge(page, memcg); |
738 | put_page(page); | 745 | put_page(page); |
739 | pte_free(mm, pgtable); | 746 | pte_free(mm, pgtable); |
740 | } else { | 747 | } else { |
@@ -742,6 +749,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
742 | entry = mk_huge_pmd(page, vma->vm_page_prot); | 749 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
743 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | 750 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
744 | page_add_new_anon_rmap(page, vma, haddr); | 751 | page_add_new_anon_rmap(page, vma, haddr); |
752 | mem_cgroup_commit_charge(page, memcg, false); | ||
753 | lru_cache_add_active_or_unevictable(page, vma); | ||
745 | pgtable_trans_huge_deposit(mm, pmd, pgtable); | 754 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
746 | set_pmd_at(mm, haddr, pmd, entry); | 755 | set_pmd_at(mm, haddr, pmd, entry); |
747 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 756 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
@@ -827,13 +836,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
827 | count_vm_event(THP_FAULT_FALLBACK); | 836 | count_vm_event(THP_FAULT_FALLBACK); |
828 | return VM_FAULT_FALLBACK; | 837 | return VM_FAULT_FALLBACK; |
829 | } | 838 | } |
830 | if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_TRANSHUGE))) { | ||
831 | put_page(page); | ||
832 | count_vm_event(THP_FAULT_FALLBACK); | ||
833 | return VM_FAULT_FALLBACK; | ||
834 | } | ||
835 | if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { | 839 | if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { |
836 | mem_cgroup_uncharge_page(page); | ||
837 | put_page(page); | 840 | put_page(page); |
838 | count_vm_event(THP_FAULT_FALLBACK); | 841 | count_vm_event(THP_FAULT_FALLBACK); |
839 | return VM_FAULT_FALLBACK; | 842 | return VM_FAULT_FALLBACK; |
@@ -979,6 +982,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
979 | struct page *page, | 982 | struct page *page, |
980 | unsigned long haddr) | 983 | unsigned long haddr) |
981 | { | 984 | { |
985 | struct mem_cgroup *memcg; | ||
982 | spinlock_t *ptl; | 986 | spinlock_t *ptl; |
983 | pgtable_t pgtable; | 987 | pgtable_t pgtable; |
984 | pmd_t _pmd; | 988 | pmd_t _pmd; |
@@ -999,20 +1003,21 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
999 | __GFP_OTHER_NODE, | 1003 | __GFP_OTHER_NODE, |
1000 | vma, address, page_to_nid(page)); | 1004 | vma, address, page_to_nid(page)); |
1001 | if (unlikely(!pages[i] || | 1005 | if (unlikely(!pages[i] || |
1002 | mem_cgroup_charge_anon(pages[i], mm, | 1006 | mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, |
1003 | GFP_KERNEL))) { | 1007 | &memcg))) { |
1004 | if (pages[i]) | 1008 | if (pages[i]) |
1005 | put_page(pages[i]); | 1009 | put_page(pages[i]); |
1006 | mem_cgroup_uncharge_start(); | ||
1007 | while (--i >= 0) { | 1010 | while (--i >= 0) { |
1008 | mem_cgroup_uncharge_page(pages[i]); | 1011 | memcg = (void *)page_private(pages[i]); |
1012 | set_page_private(pages[i], 0); | ||
1013 | mem_cgroup_cancel_charge(pages[i], memcg); | ||
1009 | put_page(pages[i]); | 1014 | put_page(pages[i]); |
1010 | } | 1015 | } |
1011 | mem_cgroup_uncharge_end(); | ||
1012 | kfree(pages); | 1016 | kfree(pages); |
1013 | ret |= VM_FAULT_OOM; | 1017 | ret |= VM_FAULT_OOM; |
1014 | goto out; | 1018 | goto out; |
1015 | } | 1019 | } |
1020 | set_page_private(pages[i], (unsigned long)memcg); | ||
1016 | } | 1021 | } |
1017 | 1022 | ||
1018 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 1023 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
@@ -1041,7 +1046,11 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
1041 | pte_t *pte, entry; | 1046 | pte_t *pte, entry; |
1042 | entry = mk_pte(pages[i], vma->vm_page_prot); | 1047 | entry = mk_pte(pages[i], vma->vm_page_prot); |
1043 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1048 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
1049 | memcg = (void *)page_private(pages[i]); | ||
1050 | set_page_private(pages[i], 0); | ||
1044 | page_add_new_anon_rmap(pages[i], vma, haddr); | 1051 | page_add_new_anon_rmap(pages[i], vma, haddr); |
1052 | mem_cgroup_commit_charge(pages[i], memcg, false); | ||
1053 | lru_cache_add_active_or_unevictable(pages[i], vma); | ||
1045 | pte = pte_offset_map(&_pmd, haddr); | 1054 | pte = pte_offset_map(&_pmd, haddr); |
1046 | VM_BUG_ON(!pte_none(*pte)); | 1055 | VM_BUG_ON(!pte_none(*pte)); |
1047 | set_pte_at(mm, haddr, pte, entry); | 1056 | set_pte_at(mm, haddr, pte, entry); |
@@ -1065,12 +1074,12 @@ out: | |||
1065 | out_free_pages: | 1074 | out_free_pages: |
1066 | spin_unlock(ptl); | 1075 | spin_unlock(ptl); |
1067 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 1076 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
1068 | mem_cgroup_uncharge_start(); | ||
1069 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 1077 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
1070 | mem_cgroup_uncharge_page(pages[i]); | 1078 | memcg = (void *)page_private(pages[i]); |
1079 | set_page_private(pages[i], 0); | ||
1080 | mem_cgroup_cancel_charge(pages[i], memcg); | ||
1071 | put_page(pages[i]); | 1081 | put_page(pages[i]); |
1072 | } | 1082 | } |
1073 | mem_cgroup_uncharge_end(); | ||
1074 | kfree(pages); | 1083 | kfree(pages); |
1075 | goto out; | 1084 | goto out; |
1076 | } | 1085 | } |
@@ -1081,6 +1090,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1081 | spinlock_t *ptl; | 1090 | spinlock_t *ptl; |
1082 | int ret = 0; | 1091 | int ret = 0; |
1083 | struct page *page = NULL, *new_page; | 1092 | struct page *page = NULL, *new_page; |
1093 | struct mem_cgroup *memcg; | ||
1084 | unsigned long haddr; | 1094 | unsigned long haddr; |
1085 | unsigned long mmun_start; /* For mmu_notifiers */ | 1095 | unsigned long mmun_start; /* For mmu_notifiers */ |
1086 | unsigned long mmun_end; /* For mmu_notifiers */ | 1096 | unsigned long mmun_end; /* For mmu_notifiers */ |
@@ -1132,7 +1142,8 @@ alloc: | |||
1132 | goto out; | 1142 | goto out; |
1133 | } | 1143 | } |
1134 | 1144 | ||
1135 | if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE))) { | 1145 | if (unlikely(mem_cgroup_try_charge(new_page, mm, |
1146 | GFP_TRANSHUGE, &memcg))) { | ||
1136 | put_page(new_page); | 1147 | put_page(new_page); |
1137 | if (page) { | 1148 | if (page) { |
1138 | split_huge_page(page); | 1149 | split_huge_page(page); |
@@ -1161,7 +1172,7 @@ alloc: | |||
1161 | put_user_huge_page(page); | 1172 | put_user_huge_page(page); |
1162 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { | 1173 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { |
1163 | spin_unlock(ptl); | 1174 | spin_unlock(ptl); |
1164 | mem_cgroup_uncharge_page(new_page); | 1175 | mem_cgroup_cancel_charge(new_page, memcg); |
1165 | put_page(new_page); | 1176 | put_page(new_page); |
1166 | goto out_mn; | 1177 | goto out_mn; |
1167 | } else { | 1178 | } else { |
@@ -1170,6 +1181,8 @@ alloc: | |||
1170 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | 1181 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
1171 | pmdp_clear_flush(vma, haddr, pmd); | 1182 | pmdp_clear_flush(vma, haddr, pmd); |
1172 | page_add_new_anon_rmap(new_page, vma, haddr); | 1183 | page_add_new_anon_rmap(new_page, vma, haddr); |
1184 | mem_cgroup_commit_charge(new_page, memcg, false); | ||
1185 | lru_cache_add_active_or_unevictable(new_page, vma); | ||
1173 | set_pmd_at(mm, haddr, pmd, entry); | 1186 | set_pmd_at(mm, haddr, pmd, entry); |
1174 | update_mmu_cache_pmd(vma, address, pmd); | 1187 | update_mmu_cache_pmd(vma, address, pmd); |
1175 | if (!page) { | 1188 | if (!page) { |
@@ -2413,6 +2426,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2413 | spinlock_t *pmd_ptl, *pte_ptl; | 2426 | spinlock_t *pmd_ptl, *pte_ptl; |
2414 | int isolated; | 2427 | int isolated; |
2415 | unsigned long hstart, hend; | 2428 | unsigned long hstart, hend; |
2429 | struct mem_cgroup *memcg; | ||
2416 | unsigned long mmun_start; /* For mmu_notifiers */ | 2430 | unsigned long mmun_start; /* For mmu_notifiers */ |
2417 | unsigned long mmun_end; /* For mmu_notifiers */ | 2431 | unsigned long mmun_end; /* For mmu_notifiers */ |
2418 | 2432 | ||
@@ -2423,7 +2437,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2423 | if (!new_page) | 2437 | if (!new_page) |
2424 | return; | 2438 | return; |
2425 | 2439 | ||
2426 | if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE))) | 2440 | if (unlikely(mem_cgroup_try_charge(new_page, mm, |
2441 | GFP_TRANSHUGE, &memcg))) | ||
2427 | return; | 2442 | return; |
2428 | 2443 | ||
2429 | /* | 2444 | /* |
@@ -2510,6 +2525,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2510 | spin_lock(pmd_ptl); | 2525 | spin_lock(pmd_ptl); |
2511 | BUG_ON(!pmd_none(*pmd)); | 2526 | BUG_ON(!pmd_none(*pmd)); |
2512 | page_add_new_anon_rmap(new_page, vma, address); | 2527 | page_add_new_anon_rmap(new_page, vma, address); |
2528 | mem_cgroup_commit_charge(new_page, memcg, false); | ||
2529 | lru_cache_add_active_or_unevictable(new_page, vma); | ||
2513 | pgtable_trans_huge_deposit(mm, pmd, pgtable); | 2530 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
2514 | set_pmd_at(mm, address, pmd, _pmd); | 2531 | set_pmd_at(mm, address, pmd, _pmd); |
2515 | update_mmu_cache_pmd(vma, address, pmd); | 2532 | update_mmu_cache_pmd(vma, address, pmd); |
@@ -2523,7 +2540,7 @@ out_up_write: | |||
2523 | return; | 2540 | return; |
2524 | 2541 | ||
2525 | out: | 2542 | out: |
2526 | mem_cgroup_uncharge_page(new_page); | 2543 | mem_cgroup_cancel_charge(new_page, memcg); |
2527 | goto out_up_write; | 2544 | goto out_up_write; |
2528 | } | 2545 | } |
2529 | 2546 | ||