diff options
Diffstat (limited to 'arch/s390/mm/fault.c')
| -rw-r--r-- | arch/s390/mm/fault.c | 35 |
1 files changed, 23 insertions, 12 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 6a12d1bb6e09..6c013f544146 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | #define VM_FAULT_BADCONTEXT 0x010000 | 49 | #define VM_FAULT_BADCONTEXT 0x010000 |
| 50 | #define VM_FAULT_BADMAP 0x020000 | 50 | #define VM_FAULT_BADMAP 0x020000 |
| 51 | #define VM_FAULT_BADACCESS 0x040000 | 51 | #define VM_FAULT_BADACCESS 0x040000 |
| 52 | #define VM_FAULT_SIGNAL 0x080000 | ||
| 52 | 53 | ||
| 53 | static unsigned long store_indication; | 54 | static unsigned long store_indication; |
| 54 | 55 | ||
| @@ -110,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code) | |||
| 110 | if (trans_exc_code == 2) | 111 | if (trans_exc_code == 2) |
| 111 | /* Access via secondary space, set_fs setting decides */ | 112 | /* Access via secondary space, set_fs setting decides */ |
| 112 | return current->thread.mm_segment.ar4; | 113 | return current->thread.mm_segment.ar4; |
| 113 | if (user_mode == HOME_SPACE_MODE) | 114 | if (addressing_mode == HOME_SPACE_MODE) |
| 114 | /* User space if the access has been done via home space. */ | 115 | /* User space if the access has been done via home space. */ |
| 115 | return trans_exc_code == 3; | 116 | return trans_exc_code == 3; |
| 116 | /* | 117 | /* |
| @@ -219,7 +220,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
| 219 | case VM_FAULT_BADACCESS: | 220 | case VM_FAULT_BADACCESS: |
| 220 | case VM_FAULT_BADMAP: | 221 | case VM_FAULT_BADMAP: |
| 221 | /* Bad memory access. Check if it is kernel or user space. */ | 222 | /* Bad memory access. Check if it is kernel or user space. */ |
| 222 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 223 | if (user_mode(regs)) { |
| 223 | /* User mode accesses just cause a SIGSEGV */ | 224 | /* User mode accesses just cause a SIGSEGV */ |
| 224 | si_code = (fault == VM_FAULT_BADMAP) ? | 225 | si_code = (fault == VM_FAULT_BADMAP) ? |
| 225 | SEGV_MAPERR : SEGV_ACCERR; | 226 | SEGV_MAPERR : SEGV_ACCERR; |
| @@ -229,15 +230,19 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
| 229 | case VM_FAULT_BADCONTEXT: | 230 | case VM_FAULT_BADCONTEXT: |
| 230 | do_no_context(regs); | 231 | do_no_context(regs); |
| 231 | break; | 232 | break; |
| 233 | case VM_FAULT_SIGNAL: | ||
| 234 | if (!user_mode(regs)) | ||
| 235 | do_no_context(regs); | ||
| 236 | break; | ||
| 232 | default: /* fault & VM_FAULT_ERROR */ | 237 | default: /* fault & VM_FAULT_ERROR */ |
| 233 | if (fault & VM_FAULT_OOM) { | 238 | if (fault & VM_FAULT_OOM) { |
| 234 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 239 | if (!user_mode(regs)) |
| 235 | do_no_context(regs); | 240 | do_no_context(regs); |
| 236 | else | 241 | else |
| 237 | pagefault_out_of_memory(); | 242 | pagefault_out_of_memory(); |
| 238 | } else if (fault & VM_FAULT_SIGBUS) { | 243 | } else if (fault & VM_FAULT_SIGBUS) { |
| 239 | /* Kernel mode? Handle exceptions or die */ | 244 | /* Kernel mode? Handle exceptions or die */ |
| 240 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 245 | if (!user_mode(regs)) |
| 241 | do_no_context(regs); | 246 | do_no_context(regs); |
| 242 | else | 247 | else |
| 243 | do_sigbus(regs); | 248 | do_sigbus(regs); |
| @@ -286,7 +291,7 @@ static inline int do_exception(struct pt_regs *regs, int access) | |||
| 286 | 291 | ||
| 287 | address = trans_exc_code & __FAIL_ADDR_MASK; | 292 | address = trans_exc_code & __FAIL_ADDR_MASK; |
| 288 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 293 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
| 289 | flags = FAULT_FLAG_ALLOW_RETRY; | 294 | flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| 290 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) | 295 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) |
| 291 | flags |= FAULT_FLAG_WRITE; | 296 | flags |= FAULT_FLAG_WRITE; |
| 292 | down_read(&mm->mmap_sem); | 297 | down_read(&mm->mmap_sem); |
| @@ -335,6 +340,11 @@ retry: | |||
| 335 | * the fault. | 340 | * the fault. |
| 336 | */ | 341 | */ |
| 337 | fault = handle_mm_fault(mm, vma, address, flags); | 342 | fault = handle_mm_fault(mm, vma, address, flags); |
| 343 | /* No reason to continue if interrupted by SIGKILL. */ | ||
| 344 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { | ||
| 345 | fault = VM_FAULT_SIGNAL; | ||
| 346 | goto out; | ||
| 347 | } | ||
| 338 | if (unlikely(fault & VM_FAULT_ERROR)) | 348 | if (unlikely(fault & VM_FAULT_ERROR)) |
| 339 | goto out_up; | 349 | goto out_up; |
| 340 | 350 | ||
| @@ -426,7 +436,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs) | |||
| 426 | } | 436 | } |
| 427 | 437 | ||
| 428 | /* User mode accesses just cause a SIGSEGV */ | 438 | /* User mode accesses just cause a SIGSEGV */ |
| 429 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 439 | if (user_mode(regs)) { |
| 430 | do_sigsegv(regs, SEGV_MAPERR); | 440 | do_sigsegv(regs, SEGV_MAPERR); |
| 431 | return; | 441 | return; |
| 432 | } | 442 | } |
| @@ -441,6 +451,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
| 441 | struct pt_regs regs; | 451 | struct pt_regs regs; |
| 442 | int access, fault; | 452 | int access, fault; |
| 443 | 453 | ||
| 454 | /* Emulate a uaccess fault from kernel mode. */ | ||
| 444 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; | 455 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; |
| 445 | if (!irqs_disabled()) | 456 | if (!irqs_disabled()) |
| 446 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | 457 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; |
| @@ -450,12 +461,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
| 450 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; | 461 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; |
| 451 | access = write ? VM_WRITE : VM_READ; | 462 | access = write ? VM_WRITE : VM_READ; |
| 452 | fault = do_exception(®s, access); | 463 | fault = do_exception(®s, access); |
| 453 | if (unlikely(fault)) { | 464 | /* |
| 454 | if (fault & VM_FAULT_OOM) | 465 | * Since the fault happened in kernel mode while performing a uaccess |
| 455 | return -EFAULT; | 466 | * all we need to do now is emulating a fixup in case "fault" is not |
| 456 | else if (fault & VM_FAULT_SIGBUS) | 467 | * zero. |
| 457 | do_sigbus(®s); | 468 | * For the calling uaccess functions this results always in -EFAULT. |
| 458 | } | 469 | */ |
| 459 | return fault ? -EFAULT : 0; | 470 | return fault ? -EFAULT : 0; |
| 460 | } | 471 | } |
| 461 | 472 | ||
