summaryrefslogtreecommitdiffstats
path: root/arch/frv
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-09-12 18:13:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:01 -0400
commit759496ba6407c6994d6a5ce3a5e74937d7816208 (patch)
treeaeff8de8af36f70f2591114cef58c9ae7df25565 /arch/frv
parent871341023c771ad233620b7a1fb3d9c7031c4e5c (diff)
arch: mm: pass userspace fault flag to generic fault handler
Unlike global OOM handling, memory cgroup code will invoke the OOM killer in any OOM situation because it has no way of telling faults occuring in kernel context - which could be handled more gracefully - from user-triggered faults. Pass a flag that identifies faults originating in user space from the architecture-specific fault handlers to generic code so that memcg OOM handling can be improved. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: azurIt <azurit@pobox.sk> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/frv')
-rw-r--r--arch/frv/mm/fault.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 331c1e2cfb67..9a66372fc7c7 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -34,11 +34,11 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
34 struct vm_area_struct *vma; 34 struct vm_area_struct *vma;
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned long _pme, lrai, lrad, fixup; 36 unsigned long _pme, lrai, lrad, fixup;
37 unsigned long flags = 0;
37 siginfo_t info; 38 siginfo_t info;
38 pgd_t *pge; 39 pgd_t *pge;
39 pud_t *pue; 40 pud_t *pue;
40 pte_t *pte; 41 pte_t *pte;
41 int write;
42 int fault; 42 int fault;
43 43
44#if 0 44#if 0
@@ -81,6 +81,9 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
81 if (in_atomic() || !mm) 81 if (in_atomic() || !mm)
82 goto no_context; 82 goto no_context;
83 83
84 if (user_mode(__frame))
85 flags |= FAULT_FLAG_USER;
86
84 down_read(&mm->mmap_sem); 87 down_read(&mm->mmap_sem);
85 88
86 vma = find_vma(mm, ear0); 89 vma = find_vma(mm, ear0);
@@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
129 */ 132 */
130 good_area: 133 good_area:
131 info.si_code = SEGV_ACCERR; 134 info.si_code = SEGV_ACCERR;
132 write = 0;
133 switch (esr0 & ESR0_ATXC) { 135 switch (esr0 & ESR0_ATXC) {
134 default: 136 default:
135 /* handle write to write protected page */ 137 /* handle write to write protected page */
@@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
140#endif 142#endif
141 if (!(vma->vm_flags & VM_WRITE)) 143 if (!(vma->vm_flags & VM_WRITE))
142 goto bad_area; 144 goto bad_area;
143 write = 1; 145 flags |= FAULT_FLAG_WRITE;
144 break; 146 break;
145 147
146 /* handle read from protected page */ 148 /* handle read from protected page */
@@ -162,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
162 * make sure we exit gracefully rather than endlessly redo 164 * make sure we exit gracefully rather than endlessly redo
163 * the fault. 165 * the fault.
164 */ 166 */
165 fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); 167 fault = handle_mm_fault(mm, vma, ear0, flags);
166 if (unlikely(fault & VM_FAULT_ERROR)) { 168 if (unlikely(fault & VM_FAULT_ERROR)) {
167 if (fault & VM_FAULT_OOM) 169 if (fault & VM_FAULT_OOM)
168 goto out_of_memory; 170 goto out_of_memory;