aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/mm/fault.c')
-rw-r--r--arch/avr32/mm/fault.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index f7040a1e399f..b92e60958617 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -61,10 +61,10 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
61 const struct exception_table_entry *fixup; 61 const struct exception_table_entry *fixup;
62 unsigned long address; 62 unsigned long address;
63 unsigned long page; 63 unsigned long page;
64 int writeaccess;
65 long signr; 64 long signr;
66 int code; 65 int code;
67 int fault; 66 int fault;
67 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
68 68
69 if (notify_page_fault(regs, ecr)) 69 if (notify_page_fault(regs, ecr))
70 return; 70 return;
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
86 86
87 local_irq_enable(); 87 local_irq_enable();
88 88
89retry:
89 down_read(&mm->mmap_sem); 90 down_read(&mm->mmap_sem);
90 91
91 vma = find_vma(mm, address); 92 vma = find_vma(mm, address);
@@ -104,7 +105,6 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
104 */ 105 */
105good_area: 106good_area:
106 code = SEGV_ACCERR; 107 code = SEGV_ACCERR;
107 writeaccess = 0;
108 108
109 switch (ecr) { 109 switch (ecr) {
110 case ECR_PROTECTION_X: 110 case ECR_PROTECTION_X:
@@ -121,7 +121,7 @@ good_area:
121 case ECR_TLB_MISS_W: 121 case ECR_TLB_MISS_W:
122 if (!(vma->vm_flags & VM_WRITE)) 122 if (!(vma->vm_flags & VM_WRITE))
123 goto bad_area; 123 goto bad_area;
124 writeaccess = 1; 124 flags |= FAULT_FLAG_WRITE;
125 break; 125 break;
126 default: 126 default:
127 panic("Unhandled case %lu in do_page_fault!", ecr); 127 panic("Unhandled case %lu in do_page_fault!", ecr);
@@ -132,7 +132,11 @@ good_area:
132 * sure we exit gracefully rather than endlessly redo the 132 * sure we exit gracefully rather than endlessly redo the
133 * fault. 133 * fault.
134 */ 134 */
135 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); 135 fault = handle_mm_fault(mm, vma, address, flags);
136
137 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
138 return;
139
136 if (unlikely(fault & VM_FAULT_ERROR)) { 140 if (unlikely(fault & VM_FAULT_ERROR)) {
137 if (fault & VM_FAULT_OOM) 141 if (fault & VM_FAULT_OOM)
138 goto out_of_memory; 142 goto out_of_memory;
@@ -140,10 +144,23 @@ good_area:
140 goto do_sigbus; 144 goto do_sigbus;
141 BUG(); 145 BUG();
142 } 146 }
143 if (fault & VM_FAULT_MAJOR) 147
144 tsk->maj_flt++; 148 if (flags & FAULT_FLAG_ALLOW_RETRY) {
145 else 149 if (fault & VM_FAULT_MAJOR)
146 tsk->min_flt++; 150 tsk->maj_flt++;
151 else
152 tsk->min_flt++;
153 if (fault & VM_FAULT_RETRY) {
154 flags &= ~FAULT_FLAG_ALLOW_RETRY;
155
156 /*
157 * No need to up_read(&mm->mmap_sem) as we would have
158 * already released it in __lock_page_or_retry() in
159 * mm/filemap.c.
160 */
161 goto retry;
162 }
163 }
147 164
148 up_read(&mm->mmap_sem); 165 up_read(&mm->mmap_sem);
149 return; 166 return;