aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorKautuk Consul <consul.kautuk@gmail.com>2012-06-14 16:11:37 -0400
committerTony Luck <tony.luck@intel.com>2012-06-14 16:11:37 -0400
commitf28fa729149c8b39699f7995ce5fff34c5145a9d (patch)
tree3dba7e7ef8b5eb76d456a0172706ec2162ef13d9 /arch/ia64/mm
parentcfaf025112d3856637ff34a767ef785ef5cf2ca9 (diff)
[IA64] Port OOM changes to ia64_do_page_fault
Commit d065bd810b6deb67d4897a14bfe21f8eb526ba99 (mm: retry page fault when blocking on disk transfer) and commit 37b23e0525d393d48a7d59f870b3bc061a30ccdb (x86,mm: make pagefault killable) The above commits introduced changes into the x86 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Port these changes to ia64. Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/fault.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02d29c2a132a..8443daf4f515 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
72 return pte_present(pte); 72 return pte_present(pte);
73} 73}
74 74
75# define VM_READ_BIT 0
76# define VM_WRITE_BIT 1
77# define VM_EXEC_BIT 2
78
75void __kprobes 79void __kprobes
76ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
77{ 81{
@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
81 struct siginfo si; 85 struct siginfo si;
82 unsigned long mask; 86 unsigned long mask;
83 int fault; 87 int fault;
88 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
89
90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
92
93 flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
84 94
85 /* mmap_sem is performance critical.... */ 95 /* mmap_sem is performance critical.... */
86 prefetchw(&mm->mmap_sem); 96 prefetchw(&mm->mmap_sem);
@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
109 if (notify_page_fault(regs, TRAP_BRKPT)) 119 if (notify_page_fault(regs, TRAP_BRKPT))
110 return; 120 return;
111 121
122retry:
112 down_read(&mm->mmap_sem); 123 down_read(&mm->mmap_sem);
113 124
114 vma = find_vma_prev(mm, address, &prev_vma); 125 vma = find_vma_prev(mm, address, &prev_vma);
@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
130 141
131 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 142 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
132 143
133# define VM_READ_BIT 0
134# define VM_WRITE_BIT 1
135# define VM_EXEC_BIT 2
136
137# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 144# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
138 || (1 << VM_EXEC_BIT) != VM_EXEC) 145 || (1 << VM_EXEC_BIT) != VM_EXEC)
139# error File is out of sync with <linux/mm.h>. Please update. 146# error File is out of sync with <linux/mm.h>. Please update.
@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
142 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) 149 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
143 goto bad_area; 150 goto bad_area;
144 151
145 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
146 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
147
148 if ((vma->vm_flags & mask) != mask) 152 if ((vma->vm_flags & mask) != mask)
149 goto bad_area; 153 goto bad_area;
150 154
@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
153 * sure we exit gracefully rather than endlessly redo the 157 * sure we exit gracefully rather than endlessly redo the
154 * fault. 158 * fault.
155 */ 159 */
156 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); 160 fault = handle_mm_fault(mm, vma, address, flags);
161
162 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
163 return;
164
157 if (unlikely(fault & VM_FAULT_ERROR)) { 165 if (unlikely(fault & VM_FAULT_ERROR)) {
158 /* 166 /*
159 * We ran out of memory, or some other thing happened 167 * We ran out of memory, or some other thing happened
@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
168 } 176 }
169 BUG(); 177 BUG();
170 } 178 }
171 if (fault & VM_FAULT_MAJOR) 179
172 current->maj_flt++; 180 if (flags & FAULT_FLAG_ALLOW_RETRY) {
173 else 181 if (fault & VM_FAULT_MAJOR)
174 current->min_flt++; 182 current->maj_flt++;
183 else
184 current->min_flt++;
185 if (fault & VM_FAULT_RETRY) {
186 flags &= ~FAULT_FLAG_ALLOW_RETRY;
187
188 /* No need to up_read(&mm->mmap_sem) as we would
189 * have already released it in __lock_page_or_retry
190 * in mm/filemap.c.
191 */
192
193 goto retry;
194 }
195 }
196
175 up_read(&mm->mmap_sem); 197 up_read(&mm->mmap_sem);
176 return; 198 return;
177 199