aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ca0003947115..1311f26497e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -837,6 +837,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
837 */ 837 */
838 make_migration_entry_read(&entry); 838 make_migration_entry_read(&entry);
839 pte = swp_entry_to_pte(entry); 839 pte = swp_entry_to_pte(entry);
840 if (pte_swp_soft_dirty(*src_pte))
841 pte = pte_swp_mksoft_dirty(pte);
840 set_pte_at(src_mm, addr, src_pte, pte); 842 set_pte_at(src_mm, addr, src_pte, pte);
841 } 843 }
842 } 844 }
@@ -3863,15 +3865,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3863 * space. Kernel faults are handled more gracefully. 3865 * space. Kernel faults are handled more gracefully.
3864 */ 3866 */
3865 if (flags & FAULT_FLAG_USER) 3867 if (flags & FAULT_FLAG_USER)
3866 mem_cgroup_enable_oom(); 3868 mem_cgroup_oom_enable();
3867 3869
3868 ret = __handle_mm_fault(mm, vma, address, flags); 3870 ret = __handle_mm_fault(mm, vma, address, flags);
3869 3871
3870 if (flags & FAULT_FLAG_USER) 3872 if (flags & FAULT_FLAG_USER) {
3871 mem_cgroup_disable_oom(); 3873 mem_cgroup_oom_disable();
3872 3874 /*
3873 if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))) 3875 * The task may have entered a memcg OOM situation but
3874 mem_cgroup_oom_synchronize(); 3876 * if the allocation error was handled gracefully (no
3877 * VM_FAULT_OOM), there is no need to kill anything.
3878 * Just clean up the OOM state peacefully.
3879 */
3880 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
3881 mem_cgroup_oom_synchronize(false);
3882 }
3875 3883
3876 return ret; 3884 return ret;
3877} 3885}