aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r--arch/s390/mm/fault.c77
1 files changed, 46 insertions, 31 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2505b2ea0ef1..fe5701e9efbf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,6 +52,14 @@
52#define VM_FAULT_BADMAP 0x020000 52#define VM_FAULT_BADMAP 0x020000
53#define VM_FAULT_BADACCESS 0x040000 53#define VM_FAULT_BADACCESS 0x040000
54 54
55static unsigned long store_indication;
56
57void fault_init(void)
58{
59 if (test_facility(2) && test_facility(75))
60 store_indication = 0xc00;
61}
62
55static inline int notify_page_fault(struct pt_regs *regs) 63static inline int notify_page_fault(struct pt_regs *regs)
56{ 64{
57 int ret = 0; 65 int ret = 0;
@@ -199,14 +207,21 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
199 unsigned long trans_exc_code) 207 unsigned long trans_exc_code)
200{ 208{
201 struct task_struct *tsk = current; 209 struct task_struct *tsk = current;
210 unsigned long address;
211 struct siginfo si;
202 212
203 /* 213 /*
204 * Send a sigbus, regardless of whether we were in kernel 214 * Send a sigbus, regardless of whether we were in kernel
205 * or user mode. 215 * or user mode.
206 */ 216 */
207 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; 217 address = trans_exc_code & __FAIL_ADDR_MASK;
218 tsk->thread.prot_addr = address;
208 tsk->thread.trap_no = int_code; 219 tsk->thread.trap_no = int_code;
209 force_sig(SIGBUS, tsk); 220 si.si_signo = SIGBUS;
221 si.si_errno = 0;
222 si.si_code = BUS_ADRERR;
223 si.si_addr = (void __user *) address;
224 force_sig_info(SIGBUS, &si, tsk);
210} 225}
211 226
212#ifdef CONFIG_S390_EXEC_PROTECT 227#ifdef CONFIG_S390_EXEC_PROTECT
@@ -266,10 +281,11 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
266 if (fault & VM_FAULT_OOM) 281 if (fault & VM_FAULT_OOM)
267 pagefault_out_of_memory(); 282 pagefault_out_of_memory();
268 else if (fault & VM_FAULT_SIGBUS) { 283 else if (fault & VM_FAULT_SIGBUS) {
269 do_sigbus(regs, int_code, trans_exc_code);
270 /* Kernel mode? Handle exceptions or die */ 284 /* Kernel mode? Handle exceptions or die */
271 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 285 if (!(regs->psw.mask & PSW_MASK_PSTATE))
272 do_no_context(regs, int_code, trans_exc_code); 286 do_no_context(regs, int_code, trans_exc_code);
287 else
288 do_sigbus(regs, int_code, trans_exc_code);
273 } else 289 } else
274 BUG(); 290 BUG();
275 break; 291 break;
@@ -294,7 +310,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
294 struct mm_struct *mm; 310 struct mm_struct *mm;
295 struct vm_area_struct *vma; 311 struct vm_area_struct *vma;
296 unsigned long address; 312 unsigned long address;
297 int fault; 313 int fault, write;
298 314
299 if (notify_page_fault(regs)) 315 if (notify_page_fault(regs))
300 return 0; 316 return 0;
@@ -312,12 +328,6 @@ static inline int do_exception(struct pt_regs *regs, int access,
312 goto out; 328 goto out;
313 329
314 address = trans_exc_code & __FAIL_ADDR_MASK; 330 address = trans_exc_code & __FAIL_ADDR_MASK;
315 /*
316 * When we get here, the fault happened in the current
317 * task's user address space, so we can switch on the
318 * interrupts again and then search the VMAs
319 */
320 local_irq_enable();
321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
322 down_read(&mm->mmap_sem); 332 down_read(&mm->mmap_sem);
323 333
@@ -348,8 +358,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
348 * make sure we exit gracefully rather than endlessly redo 358 * make sure we exit gracefully rather than endlessly redo
349 * the fault. 359 * the fault.
350 */ 360 */
351 fault = handle_mm_fault(mm, vma, address, 361 write = (access == VM_WRITE ||
352 (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0); 362 (trans_exc_code & store_indication) == 0x400) ?
363 FAULT_FLAG_WRITE : 0;
364 fault = handle_mm_fault(mm, vma, address, write);
353 if (unlikely(fault & VM_FAULT_ERROR)) 365 if (unlikely(fault & VM_FAULT_ERROR))
354 goto out_up; 366 goto out_up;
355 367
@@ -374,20 +386,20 @@ out:
374 return fault; 386 return fault;
375} 387}
376 388
377void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) 389void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
390 unsigned long trans_exc_code)
378{ 391{
379 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
380 int fault; 392 int fault;
381 393
382 /* Protection exception is supressing, decrement psw address. */ 394 /* Protection exception is supressing, decrement psw address. */
383 regs->psw.addr -= (int_code >> 16); 395 regs->psw.addr -= (pgm_int_code >> 16);
384 /* 396 /*
385 * Check for low-address protection. This needs to be treated 397 * Check for low-address protection. This needs to be treated
386 * as a special case because the translation exception code 398 * as a special case because the translation exception code
387 * field is not guaranteed to contain valid data in this case. 399 * field is not guaranteed to contain valid data in this case.
388 */ 400 */
389 if (unlikely(!(trans_exc_code & 4))) { 401 if (unlikely(!(trans_exc_code & 4))) {
390 do_low_address(regs, int_code, trans_exc_code); 402 do_low_address(regs, pgm_int_code, trans_exc_code);
391 return; 403 return;
392 } 404 }
393 fault = do_exception(regs, VM_WRITE, trans_exc_code); 405 fault = do_exception(regs, VM_WRITE, trans_exc_code);
@@ -395,9 +407,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
395 do_fault_error(regs, 4, trans_exc_code, fault); 407 do_fault_error(regs, 4, trans_exc_code, fault);
396} 408}
397 409
398void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) 410void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
411 unsigned long trans_exc_code)
399{ 412{
400 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
401 int access, fault; 413 int access, fault;
402 414
403 access = VM_READ | VM_EXEC | VM_WRITE; 415 access = VM_READ | VM_EXEC | VM_WRITE;
@@ -408,21 +420,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
408#endif 420#endif
409 fault = do_exception(regs, access, trans_exc_code); 421 fault = do_exception(regs, access, trans_exc_code);
410 if (unlikely(fault)) 422 if (unlikely(fault))
411 do_fault_error(regs, int_code & 255, trans_exc_code, fault); 423 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
412} 424}
413 425
414#ifdef CONFIG_64BIT 426#ifdef CONFIG_64BIT
415void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) 427void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
428 unsigned long trans_exc_code)
416{ 429{
417 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
418 struct mm_struct *mm = current->mm; 430 struct mm_struct *mm = current->mm;
419 struct vm_area_struct *vma; 431 struct vm_area_struct *vma;
420 432
421 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 433 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
422 goto no_context; 434 goto no_context;
423 435
424 local_irq_enable();
425
426 down_read(&mm->mmap_sem); 436 down_read(&mm->mmap_sem);
427 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); 437 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
428 up_read(&mm->mmap_sem); 438 up_read(&mm->mmap_sem);
@@ -434,16 +444,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
434 444
435 /* User mode accesses just cause a SIGSEGV */ 445 /* User mode accesses just cause a SIGSEGV */
436 if (regs->psw.mask & PSW_MASK_PSTATE) { 446 if (regs->psw.mask & PSW_MASK_PSTATE) {
437 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); 447 do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
438 return; 448 return;
439 } 449 }
440 450
441no_context: 451no_context:
442 do_no_context(regs, int_code, trans_exc_code); 452 do_no_context(regs, pgm_int_code, trans_exc_code);
443} 453}
444#endif 454#endif
445 455
446int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) 456int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
447{ 457{
448 struct pt_regs regs; 458 struct pt_regs regs;
449 int access, fault; 459 int access, fault;
@@ -454,14 +464,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
454 regs.psw.addr = (unsigned long) __builtin_return_address(0); 464 regs.psw.addr = (unsigned long) __builtin_return_address(0);
455 regs.psw.addr |= PSW_ADDR_AMODE; 465 regs.psw.addr |= PSW_ADDR_AMODE;
456 uaddr &= PAGE_MASK; 466 uaddr &= PAGE_MASK;
457 access = write_user ? VM_WRITE : VM_READ; 467 access = write ? VM_WRITE : VM_READ;
458 fault = do_exception(&regs, access, uaddr | 2); 468 fault = do_exception(&regs, access, uaddr | 2);
459 if (unlikely(fault)) { 469 if (unlikely(fault)) {
460 if (fault & VM_FAULT_OOM) { 470 if (fault & VM_FAULT_OOM) {
461 pagefault_out_of_memory(); 471 pagefault_out_of_memory();
462 fault = 0; 472 fault = 0;
463 } else if (fault & VM_FAULT_SIGBUS) 473 } else if (fault & VM_FAULT_SIGBUS)
464 do_sigbus(&regs, int_code, uaddr); 474 do_sigbus(&regs, pgm_int_code, uaddr);
465 } 475 }
466 return fault ? -EFAULT : 0; 476 return fault ? -EFAULT : 0;
467} 477}
@@ -527,7 +537,8 @@ void pfault_fini(void)
527 : : "a" (&refbk), "m" (refbk) : "cc"); 537 : : "a" (&refbk), "m" (refbk) : "cc");
528} 538}
529 539
530static void pfault_interrupt(__u16 int_code) 540static void pfault_interrupt(unsigned int ext_int_code,
541 unsigned int param32, unsigned long param64)
531{ 542{
532 struct task_struct *tsk; 543 struct task_struct *tsk;
533 __u16 subcode; 544 __u16 subcode;
@@ -538,14 +549,18 @@ static void pfault_interrupt(__u16 int_code)
538 * in the 'cpu address' field associated with the 549 * in the 'cpu address' field associated with the
539 * external interrupt. 550 * external interrupt.
540 */ 551 */
541 subcode = S390_lowcore.cpu_addr; 552 subcode = ext_int_code >> 16;
542 if ((subcode & 0xff00) != __SUBCODE_MASK) 553 if ((subcode & 0xff00) != __SUBCODE_MASK)
543 return; 554 return;
544 555
545 /* 556 /*
546 * Get the token (= address of the task structure of the affected task). 557 * Get the token (= address of the task structure of the affected task).
547 */ 558 */
548 tsk = *(struct task_struct **) __LC_PFAULT_INTPARM; 559#ifdef CONFIG_64BIT
560 tsk = *(struct task_struct **) param64;
561#else
562 tsk = *(struct task_struct **) param32;
563#endif
549 564
550 if (subcode & 0x0080) { 565 if (subcode & 0x0080) {
551 /* signal bit is set -> a page has been swapped in by VM */ 566 /* signal bit is set -> a page has been swapped in by VM */