aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/fault.c46
-rw-r--r--lib/Kconfig.debug2
2 files changed, 35 insertions, 13 deletions
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02d29c2a132a..8443daf4f515 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
72 return pte_present(pte); 72 return pte_present(pte);
73} 73}
74 74
75# define VM_READ_BIT 0
76# define VM_WRITE_BIT 1
77# define VM_EXEC_BIT 2
78
75void __kprobes 79void __kprobes
76ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
77{ 81{
@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
81 struct siginfo si; 85 struct siginfo si;
82 unsigned long mask; 86 unsigned long mask;
83 int fault; 87 int fault;
88 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
89
90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
92
93 flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
84 94
85 /* mmap_sem is performance critical.... */ 95 /* mmap_sem is performance critical.... */
86 prefetchw(&mm->mmap_sem); 96 prefetchw(&mm->mmap_sem);
@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
109 if (notify_page_fault(regs, TRAP_BRKPT)) 119 if (notify_page_fault(regs, TRAP_BRKPT))
110 return; 120 return;
111 121
122retry:
112 down_read(&mm->mmap_sem); 123 down_read(&mm->mmap_sem);
113 124
114 vma = find_vma_prev(mm, address, &prev_vma); 125 vma = find_vma_prev(mm, address, &prev_vma);
@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
130 141
131 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 142 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
132 143
133# define VM_READ_BIT 0
134# define VM_WRITE_BIT 1
135# define VM_EXEC_BIT 2
136
137# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 144# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
138 || (1 << VM_EXEC_BIT) != VM_EXEC) 145 || (1 << VM_EXEC_BIT) != VM_EXEC)
139# error File is out of sync with <linux/mm.h>. Please update. 146# error File is out of sync with <linux/mm.h>. Please update.
@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
142 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) 149 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
143 goto bad_area; 150 goto bad_area;
144 151
145 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
146 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
147
148 if ((vma->vm_flags & mask) != mask) 152 if ((vma->vm_flags & mask) != mask)
149 goto bad_area; 153 goto bad_area;
150 154
@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
153 * sure we exit gracefully rather than endlessly redo the 157 * sure we exit gracefully rather than endlessly redo the
154 * fault. 158 * fault.
155 */ 159 */
156 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); 160 fault = handle_mm_fault(mm, vma, address, flags);
161
162 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
163 return;
164
157 if (unlikely(fault & VM_FAULT_ERROR)) { 165 if (unlikely(fault & VM_FAULT_ERROR)) {
158 /* 166 /*
159 * We ran out of memory, or some other thing happened 167 * We ran out of memory, or some other thing happened
@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
168 } 176 }
169 BUG(); 177 BUG();
170 } 178 }
171 if (fault & VM_FAULT_MAJOR) 179
172 current->maj_flt++; 180 if (flags & FAULT_FLAG_ALLOW_RETRY) {
173 else 181 if (fault & VM_FAULT_MAJOR)
174 current->min_flt++; 182 current->maj_flt++;
183 else
184 current->min_flt++;
185 if (fault & VM_FAULT_RETRY) {
186 flags &= ~FAULT_FLAG_ALLOW_RETRY;
187
188 /* No need to up_read(&mm->mmap_sem) as we would
189 * have already released it in __lock_page_or_retry
190 * in mm/filemap.c.
191 */
192
193 goto retry;
194 }
195 }
196
175 up_read(&mm->mmap_sem); 197 up_read(&mm->mmap_sem);
176 return; 198 return;
177 199
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ff5bdee4716d..4a186508bf8b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -714,7 +714,7 @@ config STACKTRACE
714 714
715config DEBUG_STACK_USAGE 715config DEBUG_STACK_USAGE
716 bool "Stack utilization instrumentation" 716 bool "Stack utilization instrumentation"
717 depends on DEBUG_KERNEL 717 depends on DEBUG_KERNEL && !IA64 && !PARISC
718 help 718 help
719 Enables the display of the minimum amount of free stack which each 719 Enables the display of the minimum amount of free stack which each
720 task has ever had available in the sysrq-T and sysrq-P debug output. 720 task has ever had available in the sysrq-T and sysrq-P debug output.