diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 18:07:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 18:07:51 -0400 |
commit | 3eb514866f20c5eb74637279774b6d73b855480a (patch) | |
tree | 72506a3ee7caf658db86a63d1f21a483f8a5d6d6 /arch/arc/mm/fault.c | |
parent | c309b6f24222246c18a8b65d3950e6e755440865 (diff) | |
parent | 24a20b0a443fd485852d51d08e98bbd9d212e0ec (diff) |
Merge tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
- long due rewrite of do_page_fault
- refactoring of entry/exit code to utilize the double load/store
instructions
- hsdk platform updates
* tag 'arc-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
ARC: [plat-hsdk]: Enable AXI DW DMAC in defconfig
ARC: [plat-hsdk]: enable DW SPI controller
ARC: hide unused function unw_hdr_alloc
ARC: [haps] Add Virtio support
ARCv2: entry: simplify return to Delay Slot via interrupt
ARC: entry: EV_Trap expects r10 (vs. r9) to have exception cause
ARCv2: entry: rewrite to enable use of double load/stores LDD/STD
ARCv2: entry: avoid a branch
ARCv2: entry: push out the Z flag unclobber from common EXCEPTION_PROLOGUE
ARCv2: entry: comments about hardware auto-save on taken interrupts
ARC: mm: do_page_fault refactor #8: release mmap_sem sooner
ARC: mm: do_page_fault refactor #7: fold the various error handling
ARC: mm: do_page_fault refactor #6: error handlers to use same pattern
ARC: mm: do_page_fault refactor #5: scoot no_context to end
ARC: mm: do_page_fault refactor #4: consolidate retry related logic
ARC: mm: do_page_fault refactor #3: tidyup vma access permission code
ARC: mm: do_page_fault refactor #2: remove short lived variable
ARC: mm: do_page_fault refactor #1: remove label @good_area
Diffstat (limited to 'arch/arc/mm/fault.c')
-rw-r--r-- | arch/arc/mm/fault.c | 185 |
1 files changed, 77 insertions, 108 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 81e84426fe21..3861543b66a0 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -63,24 +63,19 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
63 | struct vm_area_struct *vma = NULL; | 63 | struct vm_area_struct *vma = NULL; |
64 | struct task_struct *tsk = current; | 64 | struct task_struct *tsk = current; |
65 | struct mm_struct *mm = tsk->mm; | 65 | struct mm_struct *mm = tsk->mm; |
66 | int si_code = SEGV_MAPERR; | 66 | int sig, si_code = SEGV_MAPERR; |
67 | int ret; | 67 | unsigned int write = 0, exec = 0, mask; |
68 | vm_fault_t fault; | 68 | vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ |
69 | int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ | 69 | unsigned int flags; /* handle_mm_fault() input */ |
70 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * We fault-in kernel-space virtual memory on-demand. The | ||
74 | * 'reference' page table is init_mm.pgd. | ||
75 | * | ||
76 | * NOTE! We MUST NOT take any locks for this case. We may | 72 | * NOTE! We MUST NOT take any locks for this case. We may |
77 | * be in an interrupt or a critical region, and should | 73 | * be in an interrupt or a critical region, and should |
78 | * only copy the information from the master page table, | 74 | * only copy the information from the master page table, |
79 | * nothing more. | 75 | * nothing more. |
80 | */ | 76 | */ |
81 | if (address >= VMALLOC_START && !user_mode(regs)) { | 77 | if (address >= VMALLOC_START && !user_mode(regs)) { |
82 | ret = handle_kernel_vaddr_fault(address); | 78 | if (unlikely(handle_kernel_vaddr_fault(address))) |
83 | if (unlikely(ret)) | ||
84 | goto no_context; | 79 | goto no_context; |
85 | else | 80 | else |
86 | return; | 81 | return; |
@@ -93,143 +88,117 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
93 | if (faulthandler_disabled() || !mm) | 88 | if (faulthandler_disabled() || !mm) |
94 | goto no_context; | 89 | goto no_context; |
95 | 90 | ||
91 | if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ | ||
92 | write = 1; | ||
93 | else if ((regs->ecr_vec == ECR_V_PROTV) && | ||
94 | (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) | ||
95 | exec = 1; | ||
96 | |||
97 | flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
96 | if (user_mode(regs)) | 98 | if (user_mode(regs)) |
97 | flags |= FAULT_FLAG_USER; | 99 | flags |= FAULT_FLAG_USER; |
100 | if (write) | ||
101 | flags |= FAULT_FLAG_WRITE; | ||
102 | |||
98 | retry: | 103 | retry: |
99 | down_read(&mm->mmap_sem); | 104 | down_read(&mm->mmap_sem); |
105 | |||
100 | vma = find_vma(mm, address); | 106 | vma = find_vma(mm, address); |
101 | if (!vma) | 107 | if (!vma) |
102 | goto bad_area; | 108 | goto bad_area; |
103 | if (vma->vm_start <= address) | 109 | if (unlikely(address < vma->vm_start)) { |
104 | goto good_area; | 110 | if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) |
105 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 111 | goto bad_area; |
106 | goto bad_area; | 112 | } |
107 | if (expand_stack(vma, address)) | ||
108 | goto bad_area; | ||
109 | 113 | ||
110 | /* | 114 | /* |
111 | * Ok, we have a good vm_area for this memory access, so | 115 | * vm_area is good, now check permissions for this memory access |
112 | * we can handle it.. | ||
113 | */ | 116 | */ |
114 | good_area: | 117 | mask = VM_READ; |
115 | si_code = SEGV_ACCERR; | 118 | if (write) |
116 | 119 | mask = VM_WRITE; | |
117 | /* Handle protection violation, execute on heap or stack */ | 120 | if (exec) |
118 | 121 | mask = VM_EXEC; | |
119 | if ((regs->ecr_vec == ECR_V_PROTV) && | 122 | |
120 | (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) | 123 | if (!(vma->vm_flags & mask)) { |
124 | si_code = SEGV_ACCERR; | ||
121 | goto bad_area; | 125 | goto bad_area; |
122 | |||
123 | if (write) { | ||
124 | if (!(vma->vm_flags & VM_WRITE)) | ||
125 | goto bad_area; | ||
126 | flags |= FAULT_FLAG_WRITE; | ||
127 | } else { | ||
128 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
129 | goto bad_area; | ||
130 | } | 126 | } |
131 | 127 | ||
132 | /* | ||
133 | * If for any reason at all we couldn't handle the fault, | ||
134 | * make sure we exit gracefully rather than endlessly redo | ||
135 | * the fault. | ||
136 | */ | ||
137 | fault = handle_mm_fault(vma, address, flags); | 128 | fault = handle_mm_fault(vma, address, flags); |
138 | 129 | ||
139 | if (fatal_signal_pending(current)) { | 130 | /* |
131 | * Fault retry nuances | ||
132 | */ | ||
133 | if (unlikely(fault & VM_FAULT_RETRY)) { | ||
140 | 134 | ||
141 | /* | 135 | /* |
142 | * if fault retry, mmap_sem already relinquished by core mm | 136 | * If fault needs to be retried, handle any pending signals |
143 | * so OK to return to user mode (with signal handled first) | 137 | * first (by returning to user mode). |
138 | * mmap_sem already relinquished by core mm for RETRY case | ||
144 | */ | 139 | */ |
145 | if (fault & VM_FAULT_RETRY) { | 140 | if (fatal_signal_pending(current)) { |
146 | if (!user_mode(regs)) | 141 | if (!user_mode(regs)) |
147 | goto no_context; | 142 | goto no_context; |
148 | return; | 143 | return; |
149 | } | 144 | } |
150 | } | 145 | /* |
151 | 146 | * retry state machine | |
152 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 147 | */ |
153 | |||
154 | if (likely(!(fault & VM_FAULT_ERROR))) { | ||
155 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 148 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
156 | /* To avoid updating stats twice for retry case */ | 149 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
157 | if (fault & VM_FAULT_MAJOR) { | 150 | flags |= FAULT_FLAG_TRIED; |
158 | tsk->maj_flt++; | 151 | goto retry; |
159 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | ||
160 | regs, address); | ||
161 | } else { | ||
162 | tsk->min_flt++; | ||
163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
164 | regs, address); | ||
165 | } | ||
166 | |||
167 | if (fault & VM_FAULT_RETRY) { | ||
168 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
169 | flags |= FAULT_FLAG_TRIED; | ||
170 | goto retry; | ||
171 | } | ||
172 | } | 152 | } |
173 | |||
174 | /* Fault Handled Gracefully */ | ||
175 | up_read(&mm->mmap_sem); | ||
176 | return; | ||
177 | } | 153 | } |
178 | 154 | ||
179 | if (fault & VM_FAULT_OOM) | 155 | bad_area: |
180 | goto out_of_memory; | 156 | up_read(&mm->mmap_sem); |
181 | else if (fault & VM_FAULT_SIGSEGV) | ||
182 | goto bad_area; | ||
183 | else if (fault & VM_FAULT_SIGBUS) | ||
184 | goto do_sigbus; | ||
185 | |||
186 | /* no man's land */ | ||
187 | BUG(); | ||
188 | 157 | ||
189 | /* | 158 | /* |
190 | * Something tried to access memory that isn't in our memory map.. | 159 | * Major/minor page fault accounting |
191 | * Fix it, but check if it's kernel or user first.. | 160 | * (in case of retry we only land here once) |
192 | */ | 161 | */ |
193 | bad_area: | 162 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
194 | up_read(&mm->mmap_sem); | ||
195 | 163 | ||
196 | /* User mode accesses just cause a SIGSEGV */ | 164 | if (likely(!(fault & VM_FAULT_ERROR))) { |
197 | if (user_mode(regs)) { | 165 | if (fault & VM_FAULT_MAJOR) { |
198 | tsk->thread.fault_address = address; | 166 | tsk->maj_flt++; |
199 | force_sig_fault(SIGSEGV, si_code, (void __user *)address); | 167 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
200 | return; | 168 | regs, address); |
201 | } | 169 | } else { |
170 | tsk->min_flt++; | ||
171 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
172 | regs, address); | ||
173 | } | ||
202 | 174 | ||
203 | no_context: | 175 | /* Normal return path: fault Handled Gracefully */ |
204 | /* Are we prepared to handle this kernel fault? | ||
205 | * | ||
206 | * (The kernel has valid exception-points in the source | ||
207 | * when it accesses user-memory. When it fails in one | ||
208 | * of those points, we find it in a table and do a jump | ||
209 | * to some fixup code that loads an appropriate error | ||
210 | * code) | ||
211 | */ | ||
212 | if (fixup_exception(regs)) | ||
213 | return; | 176 | return; |
177 | } | ||
214 | 178 | ||
215 | die("Oops", regs, address); | 179 | if (!user_mode(regs)) |
216 | 180 | goto no_context; | |
217 | out_of_memory: | ||
218 | up_read(&mm->mmap_sem); | ||
219 | 181 | ||
220 | if (user_mode(regs)) { | 182 | if (fault & VM_FAULT_OOM) { |
221 | pagefault_out_of_memory(); | 183 | pagefault_out_of_memory(); |
222 | return; | 184 | return; |
223 | } | 185 | } |
224 | 186 | ||
225 | goto no_context; | 187 | if (fault & VM_FAULT_SIGBUS) { |
188 | sig = SIGBUS; | ||
189 | si_code = BUS_ADRERR; | ||
190 | } | ||
191 | else { | ||
192 | sig = SIGSEGV; | ||
193 | } | ||
226 | 194 | ||
227 | do_sigbus: | 195 | tsk->thread.fault_address = address; |
228 | up_read(&mm->mmap_sem); | 196 | force_sig_fault(sig, si_code, (void __user *)address); |
197 | return; | ||
229 | 198 | ||
230 | if (!user_mode(regs)) | 199 | no_context: |
231 | goto no_context; | 200 | if (fixup_exception(regs)) |
201 | return; | ||
232 | 202 | ||
233 | tsk->thread.fault_address = address; | 203 | die("Oops", regs, address); |
234 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); | ||
235 | } | 204 | } |