aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-09-12 18:13:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:01 -0400
commit519e52473ebe9db5cdef44670d5a97f1fd53d721 (patch)
tree635fce64ff3658250745b9c8dfebd47e981a5b16 /mm/memory.c
parent3a13c4d761b4b979ba8767f42345fed3274991b0 (diff)
mm: memcg: enable memcg OOM killer only for user faults
System calls and kernel faults (uaccess, gup) can handle an out of memory situation gracefully and just return -ENOMEM. Enable the memcg OOM killer only for user faults, where it's really the only option available. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: azurIt <azurit@pobox.sk> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2b73dbde2274..a8f9deab8719 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3754,22 +3754,14 @@ unlock:
3754/* 3754/*
3755 * By the time we get here, we already hold the mm semaphore 3755 * By the time we get here, we already hold the mm semaphore
3756 */ 3756 */
3757int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3757static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3758 unsigned long address, unsigned int flags) 3758 unsigned long address, unsigned int flags)
3759{ 3759{
3760 pgd_t *pgd; 3760 pgd_t *pgd;
3761 pud_t *pud; 3761 pud_t *pud;
3762 pmd_t *pmd; 3762 pmd_t *pmd;
3763 pte_t *pte; 3763 pte_t *pte;
3764 3764
3765 __set_current_state(TASK_RUNNING);
3766
3767 count_vm_event(PGFAULT);
3768 mem_cgroup_count_vm_event(mm, PGFAULT);
3769
3770 /* do counter updates before entering really critical section. */
3771 check_sync_rss_stat(current);
3772
3773 if (unlikely(is_vm_hugetlb_page(vma))) 3765 if (unlikely(is_vm_hugetlb_page(vma)))
3774 return hugetlb_fault(mm, vma, address, flags); 3766 return hugetlb_fault(mm, vma, address, flags);
3775 3767
@@ -3850,6 +3842,34 @@ retry:
3850 return handle_pte_fault(mm, vma, address, pte, pmd, flags); 3842 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
3851} 3843}
3852 3844
3845int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3846 unsigned long address, unsigned int flags)
3847{
3848 int ret;
3849
3850 __set_current_state(TASK_RUNNING);
3851
3852 count_vm_event(PGFAULT);
3853 mem_cgroup_count_vm_event(mm, PGFAULT);
3854
3855 /* do counter updates before entering really critical section. */
3856 check_sync_rss_stat(current);
3857
3858 /*
3859 * Enable the memcg OOM handling for faults triggered in user
3860 * space. Kernel faults are handled more gracefully.
3861 */
3862 if (flags & FAULT_FLAG_USER)
3863 mem_cgroup_enable_oom();
3864
3865 ret = __handle_mm_fault(mm, vma, address, flags);
3866
3867 if (flags & FAULT_FLAG_USER)
3868 mem_cgroup_disable_oom();
3869
3870 return ret;
3871}
3872
3853#ifndef __PAGETABLE_PUD_FOLDED 3873#ifndef __PAGETABLE_PUD_FOLDED
3854/* 3874/*
3855 * Allocate page upper directory. 3875 * Allocate page upper directory.