aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c35
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c7
3 files changed, 34 insertions, 20 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6a12d1bb6e09..6c013f544146 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,6 +49,7 @@
49#define VM_FAULT_BADCONTEXT 0x010000 49#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 50#define VM_FAULT_BADMAP 0x020000
51#define VM_FAULT_BADACCESS 0x040000 51#define VM_FAULT_BADACCESS 0x040000
52#define VM_FAULT_SIGNAL 0x080000
52 53
53static unsigned long store_indication; 54static unsigned long store_indication;
54 55
@@ -110,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
110 if (trans_exc_code == 2) 111 if (trans_exc_code == 2)
111 /* Access via secondary space, set_fs setting decides */ 112 /* Access via secondary space, set_fs setting decides */
112 return current->thread.mm_segment.ar4; 113 return current->thread.mm_segment.ar4;
113 if (user_mode == HOME_SPACE_MODE) 114 if (addressing_mode == HOME_SPACE_MODE)
114 /* User space if the access has been done via home space. */ 115 /* User space if the access has been done via home space. */
115 return trans_exc_code == 3; 116 return trans_exc_code == 3;
116 /* 117 /*
@@ -219,7 +220,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
219 case VM_FAULT_BADACCESS: 220 case VM_FAULT_BADACCESS:
220 case VM_FAULT_BADMAP: 221 case VM_FAULT_BADMAP:
221 /* Bad memory access. Check if it is kernel or user space. */ 222 /* Bad memory access. Check if it is kernel or user space. */
222 if (regs->psw.mask & PSW_MASK_PSTATE) { 223 if (user_mode(regs)) {
223 /* User mode accesses just cause a SIGSEGV */ 224 /* User mode accesses just cause a SIGSEGV */
224 si_code = (fault == VM_FAULT_BADMAP) ? 225 si_code = (fault == VM_FAULT_BADMAP) ?
225 SEGV_MAPERR : SEGV_ACCERR; 226 SEGV_MAPERR : SEGV_ACCERR;
@@ -229,15 +230,19 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
229 case VM_FAULT_BADCONTEXT: 230 case VM_FAULT_BADCONTEXT:
230 do_no_context(regs); 231 do_no_context(regs);
231 break; 232 break;
233 case VM_FAULT_SIGNAL:
234 if (!user_mode(regs))
235 do_no_context(regs);
236 break;
232 default: /* fault & VM_FAULT_ERROR */ 237 default: /* fault & VM_FAULT_ERROR */
233 if (fault & VM_FAULT_OOM) { 238 if (fault & VM_FAULT_OOM) {
234 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 239 if (!user_mode(regs))
235 do_no_context(regs); 240 do_no_context(regs);
236 else 241 else
237 pagefault_out_of_memory(); 242 pagefault_out_of_memory();
238 } else if (fault & VM_FAULT_SIGBUS) { 243 } else if (fault & VM_FAULT_SIGBUS) {
239 /* Kernel mode? Handle exceptions or die */ 244 /* Kernel mode? Handle exceptions or die */
240 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 245 if (!user_mode(regs))
241 do_no_context(regs); 246 do_no_context(regs);
242 else 247 else
243 do_sigbus(regs); 248 do_sigbus(regs);
@@ -286,7 +291,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
286 291
287 address = trans_exc_code & __FAIL_ADDR_MASK; 292 address = trans_exc_code & __FAIL_ADDR_MASK;
288 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 293 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
289 flags = FAULT_FLAG_ALLOW_RETRY; 294 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
290 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 295 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
291 flags |= FAULT_FLAG_WRITE; 296 flags |= FAULT_FLAG_WRITE;
292 down_read(&mm->mmap_sem); 297 down_read(&mm->mmap_sem);
@@ -335,6 +340,11 @@ retry:
335 * the fault. 340 * the fault.
336 */ 341 */
337 fault = handle_mm_fault(mm, vma, address, flags); 342 fault = handle_mm_fault(mm, vma, address, flags);
343 /* No reason to continue if interrupted by SIGKILL. */
344 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
345 fault = VM_FAULT_SIGNAL;
346 goto out;
347 }
338 if (unlikely(fault & VM_FAULT_ERROR)) 348 if (unlikely(fault & VM_FAULT_ERROR))
339 goto out_up; 349 goto out_up;
340 350
@@ -426,7 +436,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
426 } 436 }
427 437
428 /* User mode accesses just cause a SIGSEGV */ 438 /* User mode accesses just cause a SIGSEGV */
429 if (regs->psw.mask & PSW_MASK_PSTATE) { 439 if (user_mode(regs)) {
430 do_sigsegv(regs, SEGV_MAPERR); 440 do_sigsegv(regs, SEGV_MAPERR);
431 return; 441 return;
432 } 442 }
@@ -441,6 +451,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
441 struct pt_regs regs; 451 struct pt_regs regs;
442 int access, fault; 452 int access, fault;
443 453
454 /* Emulate a uaccess fault from kernel mode. */
444 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; 455 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
445 if (!irqs_disabled()) 456 if (!irqs_disabled())
446 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 457 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
@@ -450,12 +461,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
450 regs.int_parm_long = (uaddr & PAGE_MASK) | 2; 461 regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
451 access = write ? VM_WRITE : VM_READ; 462 access = write ? VM_WRITE : VM_READ;
452 fault = do_exception(&regs, access); 463 fault = do_exception(&regs, access);
453 if (unlikely(fault)) { 464 /*
454 if (fault & VM_FAULT_OOM) 465 * Since the fault happened in kernel mode while performing a uaccess
455 return -EFAULT; 466 * all we need to do now is emulating a fixup in case "fault" is not
456 else if (fault & VM_FAULT_SIGBUS) 467 * zero.
457 do_sigbus(&regs); 468 * For the calling uaccess functions this results always in -EFAULT.
458 } 469 */
459 return fault ? -EFAULT : 0; 470 return fault ? -EFAULT : 0;
460} 471}
461 472
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 573384256c5c..c59a5efa58b1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
103 103
104int s390_mmap_check(unsigned long addr, unsigned long len) 104int s390_mmap_check(unsigned long addr, unsigned long len)
105{ 105{
106 int rc;
107
106 if (!is_compat_task() && 108 if (!is_compat_task() &&
107 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
108 return crst_table_upgrade(current->mm, 1UL << 53); 110 rc = crst_table_upgrade(current->mm, 1UL << 53);
111 if (rc)
112 return rc;
113 update_mm(current->mm, current);
114 }
109 return 0; 115 return 0;
110} 116}
111 117
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
125 rc = crst_table_upgrade(mm, 1UL << 53); 131 rc = crst_table_upgrade(mm, 1UL << 53);
126 if (rc) 132 if (rc)
127 return (unsigned long) rc; 133 return (unsigned long) rc;
134 update_mm(mm, current);
128 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 135 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
129 } 136 }
130 return area; 137 return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
147 rc = crst_table_upgrade(mm, 1UL << 53); 154 rc = crst_table_upgrade(mm, 1UL << 53);
148 if (rc) 155 if (rc)
149 return (unsigned long) rc; 156 return (unsigned long) rc;
157 update_mm(mm, current);
150 area = arch_get_unmapped_area_topdown(filp, addr, len, 158 area = arch_get_unmapped_area_topdown(filp, addr, len,
151 pgoff, flags); 159 pgoff, flags);
152 } 160 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1cab221077cc..18df31d1f2c9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -85,7 +85,6 @@ repeat:
85 crst_table_free(mm, table); 85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit) 86 if (mm->context.asce_limit < limit)
87 goto repeat; 87 goto repeat;
88 update_mm(mm, current);
89 return 0; 88 return 0;
90} 89}
91 90
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
93{ 92{
94 pgd_t *pgd; 93 pgd_t *pgd;
95 94
96 if (mm->context.asce_limit <= limit)
97 return;
98 __tlb_flush_mm(mm);
99 while (mm->context.asce_limit > limit) { 95 while (mm->context.asce_limit > limit) {
100 pgd = mm->pgd; 96 pgd = mm->pgd;
101 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
118 mm->task_size = mm->context.asce_limit; 114 mm->task_size = mm->context.asce_limit;
119 crst_table_free(mm, (unsigned long *) pgd); 115 crst_table_free(mm, (unsigned long *) pgd);
120 } 116 }
121 update_mm(mm, current);
122} 117}
123#endif 118#endif
124 119
@@ -801,7 +796,7 @@ int s390_enable_sie(void)
801 struct mm_struct *mm, *old_mm; 796 struct mm_struct *mm, *old_mm;
802 797
803 /* Do we have switched amode? If no, we cannot do sie */ 798 /* Do we have switched amode? If no, we cannot do sie */
804 if (user_mode == HOME_SPACE_MODE) 799 if (addressing_mode == HOME_SPACE_MODE)
805 return -EINVAL; 800 return -EINVAL;
806 801
807 /* Do we have pgstes? if yes, we are done */ 802 /* Do we have pgstes? if yes, we are done */