diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-27 10:01:43 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-27 10:01:43 -0400 |
commit | 10c1031f706bbe0690d84cdbccad15b11c6dc661 (patch) | |
tree | 791a21c066ed667676422395b4145c53a1dc2c28 /arch/s390/mm | |
parent | c0007f1a65762eaf55633d403b380130ec60adad (diff) |
[S390] Minor fault path optimization.
The minor fault path has grown a lot in terms of cycles. In particular
the kprobes hook is very costly. Optimize the path to save a couple of
cycles. If kprobes is enabled more than 300 cycles can be avoided if
kprobes_running() is false.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/fault.c | 259 |
1 files changed, 141 insertions, 118 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7462aebd3eb6..8bc35183db59 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb) | |||
63 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | 63 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline int notify_page_fault(enum die_val val, const char *str, | 66 | static int __kprobes __notify_page_fault(struct pt_regs *regs, long err) |
67 | struct pt_regs *regs, long err, int trap, int sig) | ||
68 | { | 67 | { |
69 | struct die_args args = { | 68 | struct die_args args = { .str = "page fault", |
70 | .regs = regs, | 69 | .trapnr = 14, |
71 | .str = str, | 70 | .signr = SIGSEGV }; |
72 | .err = err, | 71 | args.regs = regs; |
73 | .trapnr = trap, | 72 | args.err = err; |
74 | .signr = sig | 73 | return atomic_notifier_call_chain(¬ify_page_fault_chain, |
75 | }; | 74 | DIE_PAGE_FAULT, &args); |
76 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | 75 | } |
76 | |||
77 | static inline int notify_page_fault(struct pt_regs *regs, long err) | ||
78 | { | ||
79 | if (unlikely(kprobe_running())) | ||
80 | return __notify_page_fault(regs, err); | ||
81 | return NOTIFY_DONE; | ||
77 | } | 82 | } |
78 | #else | 83 | #else |
79 | static inline int notify_page_fault(enum die_val val, const char *str, | 84 | static inline int notify_page_fault(struct pt_regs *regs, long err) |
80 | struct pt_regs *regs, long err, int trap, int sig) | ||
81 | { | 85 | { |
82 | return NOTIFY_DONE; | 86 | return NOTIFY_DONE; |
83 | } | 87 | } |
@@ -170,6 +174,89 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, | |||
170 | force_sig_info(SIGSEGV, &si, current); | 174 | force_sig_info(SIGSEGV, &si, current); |
171 | } | 175 | } |
172 | 176 | ||
177 | static void do_no_context(struct pt_regs *regs, unsigned long error_code, | ||
178 | unsigned long address) | ||
179 | { | ||
180 | const struct exception_table_entry *fixup; | ||
181 | |||
182 | /* Are we prepared to handle this kernel fault? */ | ||
183 | fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); | ||
184 | if (fixup) { | ||
185 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
186 | return; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Oops. The kernel tried to access some bad page. We'll have to | ||
191 | * terminate things with extreme prejudice. | ||
192 | */ | ||
193 | if (check_space(current) == 0) | ||
194 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | ||
195 | " at virtual kernel address %p\n", (void *)address); | ||
196 | else | ||
197 | printk(KERN_ALERT "Unable to handle kernel paging request" | ||
198 | " at virtual user address %p\n", (void *)address); | ||
199 | |||
200 | die("Oops", regs, error_code); | ||
201 | do_exit(SIGKILL); | ||
202 | } | ||
203 | |||
204 | static void do_low_address(struct pt_regs *regs, unsigned long error_code) | ||
205 | { | ||
206 | /* Low-address protection hit in kernel mode means | ||
207 | NULL pointer write access in kernel mode. */ | ||
208 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
209 | /* Low-address protection hit in user mode 'cannot happen'. */ | ||
210 | die ("Low-address protection", regs, error_code); | ||
211 | do_exit(SIGKILL); | ||
212 | } | ||
213 | |||
214 | do_no_context(regs, error_code, 0); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * We ran out of memory, or some other thing happened to us that made | ||
219 | * us unable to handle the page fault gracefully. | ||
220 | */ | ||
221 | static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, | ||
222 | unsigned long address) | ||
223 | { | ||
224 | struct task_struct *tsk = current; | ||
225 | struct mm_struct *mm = tsk->mm; | ||
226 | |||
227 | up_read(&mm->mmap_sem); | ||
228 | if (is_init(tsk)) { | ||
229 | yield(); | ||
230 | down_read(&mm->mmap_sem); | ||
231 | return 1; | ||
232 | } | ||
233 | printk("VM: killing process %s\n", tsk->comm); | ||
234 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
235 | do_exit(SIGKILL); | ||
236 | do_no_context(regs, error_code, address); | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static void do_sigbus(struct pt_regs *regs, unsigned long error_code, | ||
241 | unsigned long address) | ||
242 | { | ||
243 | struct task_struct *tsk = current; | ||
244 | struct mm_struct *mm = tsk->mm; | ||
245 | |||
246 | up_read(&mm->mmap_sem); | ||
247 | /* | ||
248 | * Send a sigbus, regardless of whether we were in kernel | ||
249 | * or user mode. | ||
250 | */ | ||
251 | tsk->thread.prot_addr = address; | ||
252 | tsk->thread.trap_no = error_code; | ||
253 | force_sig(SIGBUS, tsk); | ||
254 | |||
255 | /* Kernel mode? Handle exceptions or die */ | ||
256 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | ||
257 | do_no_context(regs, error_code, address); | ||
258 | } | ||
259 | |||
173 | #ifdef CONFIG_S390_EXEC_PROTECT | 260 | #ifdef CONFIG_S390_EXEC_PROTECT |
174 | extern long sys_sigreturn(struct pt_regs *regs); | 261 | extern long sys_sigreturn(struct pt_regs *regs); |
175 | extern long sys_rt_sigreturn(struct pt_regs *regs); | 262 | extern long sys_rt_sigreturn(struct pt_regs *regs); |
@@ -253,49 +340,23 @@ out_fault: | |||
253 | * 3b Region third trans. -> Not present (nullification) | 340 | * 3b Region third trans. -> Not present (nullification) |
254 | */ | 341 | */ |
255 | static inline void | 342 | static inline void |
256 | do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | 343 | do_exception(struct pt_regs *regs, unsigned long error_code, int write) |
257 | { | 344 | { |
258 | struct task_struct *tsk; | 345 | struct task_struct *tsk; |
259 | struct mm_struct *mm; | 346 | struct mm_struct *mm; |
260 | struct vm_area_struct * vma; | 347 | struct vm_area_struct *vma; |
261 | unsigned long address; | 348 | unsigned long address; |
262 | const struct exception_table_entry *fixup; | ||
263 | int si_code; | ||
264 | int space; | 349 | int space; |
350 | int si_code; | ||
265 | 351 | ||
266 | tsk = current; | 352 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) |
267 | mm = tsk->mm; | ||
268 | |||
269 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | ||
270 | SIGSEGV) == NOTIFY_STOP) | ||
271 | return; | 353 | return; |
272 | 354 | ||
273 | /* | 355 | tsk = current; |
274 | * Check for low-address protection. This needs to be treated | 356 | mm = tsk->mm; |
275 | * as a special case because the translation exception code | ||
276 | * field is not guaranteed to contain valid data in this case. | ||
277 | */ | ||
278 | if (is_protection && !(S390_lowcore.trans_exc_code & 4)) { | ||
279 | |||
280 | /* Low-address protection hit in kernel mode means | ||
281 | NULL pointer write access in kernel mode. */ | ||
282 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) { | ||
283 | address = 0; | ||
284 | space = 0; | ||
285 | goto no_context; | ||
286 | } | ||
287 | |||
288 | /* Low-address protection hit in user mode 'cannot happen'. */ | ||
289 | die ("Low-address protection", regs, error_code); | ||
290 | do_exit(SIGKILL); | ||
291 | } | ||
292 | 357 | ||
293 | /* | 358 | /* get the failing address and the affected space */ |
294 | * get the failing address | 359 | address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; |
295 | * more specific the segment and page table portion of | ||
296 | * the address | ||
297 | */ | ||
298 | address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; | ||
299 | space = check_space(tsk); | 360 | space = check_space(tsk); |
300 | 361 | ||
301 | /* | 362 | /* |
@@ -313,7 +374,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
313 | */ | 374 | */ |
314 | local_irq_enable(); | 375 | local_irq_enable(); |
315 | 376 | ||
316 | down_read(&mm->mmap_sem); | 377 | down_read(&mm->mmap_sem); |
317 | 378 | ||
318 | si_code = SEGV_MAPERR; | 379 | si_code = SEGV_MAPERR; |
319 | vma = find_vma(mm, address); | 380 | vma = find_vma(mm, address); |
@@ -330,19 +391,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
330 | return; | 391 | return; |
331 | #endif | 392 | #endif |
332 | 393 | ||
333 | if (vma->vm_start <= address) | 394 | if (vma->vm_start <= address) |
334 | goto good_area; | 395 | goto good_area; |
335 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 396 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
336 | goto bad_area; | 397 | goto bad_area; |
337 | if (expand_stack(vma, address)) | 398 | if (expand_stack(vma, address)) |
338 | goto bad_area; | 399 | goto bad_area; |
339 | /* | 400 | /* |
340 | * Ok, we have a good vm_area for this memory access, so | 401 | * Ok, we have a good vm_area for this memory access, so |
341 | * we can handle it.. | 402 | * we can handle it.. |
342 | */ | 403 | */ |
343 | good_area: | 404 | good_area: |
344 | si_code = SEGV_ACCERR; | 405 | si_code = SEGV_ACCERR; |
345 | if (!is_protection) { | 406 | if (!write) { |
346 | /* page not present, check vm flags */ | 407 | /* page not present, check vm flags */ |
347 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | 408 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) |
348 | goto bad_area; | 409 | goto bad_area; |
@@ -357,7 +418,7 @@ survive: | |||
357 | * make sure we exit gracefully rather than endlessly redo | 418 | * make sure we exit gracefully rather than endlessly redo |
358 | * the fault. | 419 | * the fault. |
359 | */ | 420 | */ |
360 | switch (handle_mm_fault(mm, vma, address, is_protection)) { | 421 | switch (handle_mm_fault(mm, vma, address, write)) { |
361 | case VM_FAULT_MINOR: | 422 | case VM_FAULT_MINOR: |
362 | tsk->min_flt++; | 423 | tsk->min_flt++; |
363 | break; | 424 | break; |
@@ -365,9 +426,12 @@ survive: | |||
365 | tsk->maj_flt++; | 426 | tsk->maj_flt++; |
366 | break; | 427 | break; |
367 | case VM_FAULT_SIGBUS: | 428 | case VM_FAULT_SIGBUS: |
368 | goto do_sigbus; | 429 | do_sigbus(regs, error_code, address); |
430 | return; | ||
369 | case VM_FAULT_OOM: | 431 | case VM_FAULT_OOM: |
370 | goto out_of_memory; | 432 | if (do_out_of_memory(regs, error_code, address)) |
433 | goto survive; | ||
434 | return; | ||
371 | default: | 435 | default: |
372 | BUG(); | 436 | BUG(); |
373 | } | 437 | } |
@@ -385,75 +449,34 @@ survive: | |||
385 | * Fix it, but check if it's kernel or user first.. | 449 | * Fix it, but check if it's kernel or user first.. |
386 | */ | 450 | */ |
387 | bad_area: | 451 | bad_area: |
388 | up_read(&mm->mmap_sem); | 452 | up_read(&mm->mmap_sem); |
389 | 453 | ||
390 | /* User mode accesses just cause a SIGSEGV */ | 454 | /* User mode accesses just cause a SIGSEGV */ |
391 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 455 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
392 | tsk->thread.prot_addr = address; | 456 | tsk->thread.prot_addr = address; |
393 | tsk->thread.trap_no = error_code; | 457 | tsk->thread.trap_no = error_code; |
394 | do_sigsegv(regs, error_code, si_code, address); | 458 | do_sigsegv(regs, error_code, si_code, address); |
395 | return; | 459 | return; |
396 | } | 460 | } |
397 | 461 | ||
398 | no_context: | 462 | no_context: |
399 | /* Are we prepared to handle this kernel fault? */ | 463 | do_no_context(regs, error_code, address); |
400 | fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); | ||
401 | if (fixup) { | ||
402 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Oops. The kernel tried to access some bad page. We'll have to | ||
408 | * terminate things with extreme prejudice. | ||
409 | */ | ||
410 | if (space == 0) | ||
411 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | ||
412 | " at virtual kernel address %p\n", (void *)address); | ||
413 | else | ||
414 | printk(KERN_ALERT "Unable to handle kernel paging request" | ||
415 | " at virtual user address %p\n", (void *)address); | ||
416 | |||
417 | die("Oops", regs, error_code); | ||
418 | do_exit(SIGKILL); | ||
419 | |||
420 | |||
421 | /* | ||
422 | * We ran out of memory, or some other thing happened to us that made | ||
423 | * us unable to handle the page fault gracefully. | ||
424 | */ | ||
425 | out_of_memory: | ||
426 | up_read(&mm->mmap_sem); | ||
427 | if (is_init(tsk)) { | ||
428 | yield(); | ||
429 | down_read(&mm->mmap_sem); | ||
430 | goto survive; | ||
431 | } | ||
432 | printk("VM: killing process %s\n", tsk->comm); | ||
433 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
434 | do_exit(SIGKILL); | ||
435 | goto no_context; | ||
436 | |||
437 | do_sigbus: | ||
438 | up_read(&mm->mmap_sem); | ||
439 | |||
440 | /* | ||
441 | * Send a sigbus, regardless of whether we were in kernel | ||
442 | * or user mode. | ||
443 | */ | ||
444 | tsk->thread.prot_addr = address; | ||
445 | tsk->thread.trap_no = error_code; | ||
446 | force_sig(SIGBUS, tsk); | ||
447 | |||
448 | /* Kernel mode? Handle exceptions or die */ | ||
449 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | ||
450 | goto no_context; | ||
451 | } | 464 | } |
452 | 465 | ||
453 | void __kprobes do_protection_exception(struct pt_regs *regs, | 466 | void __kprobes do_protection_exception(struct pt_regs *regs, |
454 | unsigned long error_code) | 467 | unsigned long error_code) |
455 | { | 468 | { |
469 | /* Protection exception is supressing, decrement psw address. */ | ||
456 | regs->psw.addr -= (error_code >> 16); | 470 | regs->psw.addr -= (error_code >> 16); |
471 | /* | ||
472 | * Check for low-address protection. This needs to be treated | ||
473 | * as a special case because the translation exception code | ||
474 | * field is not guaranteed to contain valid data in this case. | ||
475 | */ | ||
476 | if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { | ||
477 | do_low_address(regs, error_code); | ||
478 | return; | ||
479 | } | ||
457 | do_exception(regs, 4, 1); | 480 | do_exception(regs, 4, 1); |
458 | } | 481 | } |
459 | 482 | ||