diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2010-10-25 10:10:37 -0400 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-10-25 10:10:19 -0400 |
commit | 1e54622e0403891b10f2105663e0f9dd595a1f17 (patch) | |
tree | 4d16341d7a3d0f3c46fcc275560a9206bccac07f /arch/s390/mm | |
parent | 84afdcee620b1640f2a145c07febae4ed68947f9 (diff) |
[S390] cleanup lowcore access from program checks
Read all required fields for program checks from the lowcore in the
first level interrupt handler in entry[64].S. If the context that
caused the fault was enabled for interrupts we can now re-enable the
irqs in entry[64].S.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/fault.c | 36 |
1 files changed, 14 insertions, 22 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index bae2c282221c..b6570069b127 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -333,12 +333,6 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
333 | goto out; | 333 | goto out; |
334 | 334 | ||
335 | address = trans_exc_code & __FAIL_ADDR_MASK; | 335 | address = trans_exc_code & __FAIL_ADDR_MASK; |
336 | /* | ||
337 | * When we get here, the fault happened in the current | ||
338 | * task's user address space, so we can switch on the | ||
339 | * interrupts again and then search the VMAs | ||
340 | */ | ||
341 | local_irq_enable(); | ||
342 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 336 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
343 | down_read(&mm->mmap_sem); | 337 | down_read(&mm->mmap_sem); |
344 | 338 | ||
@@ -397,20 +391,20 @@ out: | |||
397 | return fault; | 391 | return fault; |
398 | } | 392 | } |
399 | 393 | ||
400 | void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) | 394 | void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, |
395 | unsigned long trans_exc_code) | ||
401 | { | 396 | { |
402 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
403 | int fault; | 397 | int fault; |
404 | 398 | ||
405 | /* Protection exception is supressing, decrement psw address. */ | 399 | /* Protection exception is supressing, decrement psw address. */ |
406 | regs->psw.addr -= (int_code >> 16); | 400 | regs->psw.addr -= (pgm_int_code >> 16); |
407 | /* | 401 | /* |
408 | * Check for low-address protection. This needs to be treated | 402 | * Check for low-address protection. This needs to be treated |
409 | * as a special case because the translation exception code | 403 | * as a special case because the translation exception code |
410 | * field is not guaranteed to contain valid data in this case. | 404 | * field is not guaranteed to contain valid data in this case. |
411 | */ | 405 | */ |
412 | if (unlikely(!(trans_exc_code & 4))) { | 406 | if (unlikely(!(trans_exc_code & 4))) { |
413 | do_low_address(regs, int_code, trans_exc_code); | 407 | do_low_address(regs, pgm_int_code, trans_exc_code); |
414 | return; | 408 | return; |
415 | } | 409 | } |
416 | fault = do_exception(regs, VM_WRITE, trans_exc_code); | 410 | fault = do_exception(regs, VM_WRITE, trans_exc_code); |
@@ -418,9 +412,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) | |||
418 | do_fault_error(regs, 4, trans_exc_code, fault); | 412 | do_fault_error(regs, 4, trans_exc_code, fault); |
419 | } | 413 | } |
420 | 414 | ||
421 | void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) | 415 | void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, |
416 | unsigned long trans_exc_code) | ||
422 | { | 417 | { |
423 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
424 | int access, fault; | 418 | int access, fault; |
425 | 419 | ||
426 | access = VM_READ | VM_EXEC | VM_WRITE; | 420 | access = VM_READ | VM_EXEC | VM_WRITE; |
@@ -431,21 +425,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) | |||
431 | #endif | 425 | #endif |
432 | fault = do_exception(regs, access, trans_exc_code); | 426 | fault = do_exception(regs, access, trans_exc_code); |
433 | if (unlikely(fault)) | 427 | if (unlikely(fault)) |
434 | do_fault_error(regs, int_code & 255, trans_exc_code, fault); | 428 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); |
435 | } | 429 | } |
436 | 430 | ||
437 | #ifdef CONFIG_64BIT | 431 | #ifdef CONFIG_64BIT |
438 | void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) | 432 | void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, |
433 | unsigned long trans_exc_code) | ||
439 | { | 434 | { |
440 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
441 | struct mm_struct *mm = current->mm; | 435 | struct mm_struct *mm = current->mm; |
442 | struct vm_area_struct *vma; | 436 | struct vm_area_struct *vma; |
443 | 437 | ||
444 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 438 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) |
445 | goto no_context; | 439 | goto no_context; |
446 | 440 | ||
447 | local_irq_enable(); | ||
448 | |||
449 | down_read(&mm->mmap_sem); | 441 | down_read(&mm->mmap_sem); |
450 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); | 442 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); |
451 | up_read(&mm->mmap_sem); | 443 | up_read(&mm->mmap_sem); |
@@ -457,16 +449,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) | |||
457 | 449 | ||
458 | /* User mode accesses just cause a SIGSEGV */ | 450 | /* User mode accesses just cause a SIGSEGV */ |
459 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 451 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
460 | do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); | 452 | do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); |
461 | return; | 453 | return; |
462 | } | 454 | } |
463 | 455 | ||
464 | no_context: | 456 | no_context: |
465 | do_no_context(regs, int_code, trans_exc_code); | 457 | do_no_context(regs, pgm_int_code, trans_exc_code); |
466 | } | 458 | } |
467 | #endif | 459 | #endif |
468 | 460 | ||
469 | int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) | 461 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) |
470 | { | 462 | { |
471 | struct pt_regs regs; | 463 | struct pt_regs regs; |
472 | int access, fault; | 464 | int access, fault; |
@@ -477,14 +469,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) | |||
477 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | 469 | regs.psw.addr = (unsigned long) __builtin_return_address(0); |
478 | regs.psw.addr |= PSW_ADDR_AMODE; | 470 | regs.psw.addr |= PSW_ADDR_AMODE; |
479 | uaddr &= PAGE_MASK; | 471 | uaddr &= PAGE_MASK; |
480 | access = write_user ? VM_WRITE : VM_READ; | 472 | access = write ? VM_WRITE : VM_READ; |
481 | fault = do_exception(®s, access, uaddr | 2); | 473 | fault = do_exception(®s, access, uaddr | 2); |
482 | if (unlikely(fault)) { | 474 | if (unlikely(fault)) { |
483 | if (fault & VM_FAULT_OOM) { | 475 | if (fault & VM_FAULT_OOM) { |
484 | pagefault_out_of_memory(); | 476 | pagefault_out_of_memory(); |
485 | fault = 0; | 477 | fault = 0; |
486 | } else if (fault & VM_FAULT_SIGBUS) | 478 | } else if (fault & VM_FAULT_SIGBUS) |
487 | do_sigbus(®s, int_code, uaddr); | 479 | do_sigbus(®s, pgm_int_code, uaddr); |
488 | } | 480 | } |
489 | return fault ? -EFAULT : 0; | 481 | return fault ? -EFAULT : 0; |
490 | } | 482 | } |