aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r--arch/s390/mm/fault.c378
1 files changed, 191 insertions, 187 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967a..fc102e70d9c2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,16 +34,15 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 35#include <asm/s390_ext.h>
36#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
37#include <asm/compat.h>
37#include "../kernel/entry.h" 38#include "../kernel/entry.h"
38 39
39#ifndef CONFIG_64BIT 40#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000 41#define __FAIL_ADDR_MASK 0x7ffff000
41#define __FIXUP_MASK 0x7fffffff
42#define __SUBCODE_MASK 0x0200 42#define __SUBCODE_MASK 0x0200
43#define __PF_RES_FIELD 0ULL 43#define __PF_RES_FIELD 0ULL
44#else /* CONFIG_64BIT */ 44#else /* CONFIG_64BIT */
45#define __FAIL_ADDR_MASK -4096L 45#define __FAIL_ADDR_MASK -4096L
46#define __FIXUP_MASK ~0L
47#define __SUBCODE_MASK 0x0600 46#define __SUBCODE_MASK 0x0600
48#define __PF_RES_FIELD 0x8000000000000000ULL 47#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 48#endif /* CONFIG_64BIT */
@@ -52,11 +51,15 @@
52extern int sysctl_userprocess_debug; 51extern int sysctl_userprocess_debug;
53#endif 52#endif
54 53
55#ifdef CONFIG_KPROBES 54#define VM_FAULT_BADCONTEXT 0x010000
56static inline int notify_page_fault(struct pt_regs *regs, long err) 55#define VM_FAULT_BADMAP 0x020000
56#define VM_FAULT_BADACCESS 0x040000
57
58static inline int notify_page_fault(struct pt_regs *regs)
57{ 59{
58 int ret = 0; 60 int ret = 0;
59 61
62#ifdef CONFIG_KPROBES
60 /* kprobe_running() needs smp_processor_id() */ 63 /* kprobe_running() needs smp_processor_id() */
61 if (!user_mode(regs)) { 64 if (!user_mode(regs)) {
62 preempt_disable(); 65 preempt_disable();
@@ -64,15 +67,9 @@ static inline int notify_page_fault(struct pt_regs *regs, long err)
64 ret = 1; 67 ret = 1;
65 preempt_enable(); 68 preempt_enable();
66 } 69 }
67 70#endif
68 return ret; 71 return ret;
69} 72}
70#else
71static inline int notify_page_fault(struct pt_regs *regs, long err)
72{
73 return 0;
74}
75#endif
76 73
77 74
78/* 75/*
@@ -100,57 +97,50 @@ void bust_spinlocks(int yes)
100 97
101/* 98/*
102 * Returns the address space associated with the fault. 99 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and 100 * Returns 0 for kernel space and 1 for user space.
104 * 2 for code execution in user space with noexec=on.
105 */ 101 */
106static inline int check_space(struct task_struct *tsk) 102static inline int user_space_fault(unsigned long trans_exc_code)
107{ 103{
108 /* 104 /*
109 * The lowest two bits of S390_lowcore.trans_exc_code 105 * The lowest two bits of the translation exception
110 * indicate which paging table was used. 106 * identification indicate which paging table was used.
111 */ 107 */
112 int desc = S390_lowcore.trans_exc_code & 3; 108 trans_exc_code &= 3;
113 109 if (trans_exc_code == 2)
114 if (desc == 3) /* Home Segment Table Descriptor */ 110 /* Access via secondary space, set_fs setting decides */
115 return switch_amode == 0; 111 return current->thread.mm_segment.ar4;
116 if (desc == 2) /* Secondary Segment Table Descriptor */ 112 if (user_mode == HOME_SPACE_MODE)
117 return tsk->thread.mm_segment.ar4; 113 /* User space if the access has been done via home space. */
118#ifdef CONFIG_S390_SWITCH_AMODE 114 return trans_exc_code == 3;
119 if (unlikely(desc == 1)) { /* STD determined via access register */ 115 /*
120 /* %a0 always indicates primary space. */ 116 * If the user space is not the home space the kernel runs in home
121 if (S390_lowcore.exc_access_id != 0) { 117 * space. Access via secondary space has already been covered,
122 save_access_regs(tsk->thread.acrs); 118 * access via primary space or access register is from user space
123 /* 119 * and access via home space is from the kernel.
124 * An alet of 0 indicates primary space. 120 */
125 * An alet of 1 indicates secondary space. 121 return trans_exc_code != 3;
126 * Any other alet values generate an
127 * alen-translation exception.
128 */
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
131 }
132 }
133#endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
136} 122}
137 123
138/* 124/*
139 * Send SIGSEGV to task. This is an external routine 125 * Send SIGSEGV to task. This is an external routine
140 * to keep the stack usage of do_page_fault small. 126 * to keep the stack usage of do_page_fault small.
141 */ 127 */
142static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, 128static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
143 int si_code, unsigned long address) 129 int si_code, unsigned long trans_exc_code)
144{ 130{
145 struct siginfo si; 131 struct siginfo si;
132 unsigned long address;
146 133
134 address = trans_exc_code & __FAIL_ADDR_MASK;
135 current->thread.prot_addr = address;
136 current->thread.trap_no = int_code;
147#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 137#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
148#if defined(CONFIG_SYSCTL) 138#if defined(CONFIG_SYSCTL)
149 if (sysctl_userprocess_debug) 139 if (sysctl_userprocess_debug)
150#endif 140#endif
151 { 141 {
152 printk("User process fault: interruption code 0x%lX\n", 142 printk("User process fault: interruption code 0x%lX\n",
153 error_code); 143 int_code);
154 printk("failing address: %lX\n", address); 144 printk("failing address: %lX\n", address);
155 show_regs(regs); 145 show_regs(regs);
156 } 146 }
@@ -161,13 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
161 force_sig_info(SIGSEGV, &si, current); 151 force_sig_info(SIGSEGV, &si, current);
162} 152}
163 153
164static void do_no_context(struct pt_regs *regs, unsigned long error_code, 154static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long address) 155 unsigned long trans_exc_code)
166{ 156{
167 const struct exception_table_entry *fixup; 157 const struct exception_table_entry *fixup;
158 unsigned long address;
168 159
169 /* Are we prepared to handle this kernel fault? */ 160 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 161 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
171 if (fixup) { 162 if (fixup) {
172 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 163 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
173 return; 164 return;
@@ -177,129 +168,149 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
177 * Oops. The kernel tried to access some bad page. We'll have to 168 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice. 169 * terminate things with extreme prejudice.
179 */ 170 */
180 if (check_space(current) == 0) 171 address = trans_exc_code & __FAIL_ADDR_MASK;
172 if (!user_space_fault(trans_exc_code))
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 173 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address); 174 " at virtual kernel address %p\n", (void *)address);
183 else 175 else
184 printk(KERN_ALERT "Unable to handle kernel paging request" 176 printk(KERN_ALERT "Unable to handle kernel paging request"
185 " at virtual user address %p\n", (void *)address); 177 " at virtual user address %p\n", (void *)address);
186 178
187 die("Oops", regs, error_code); 179 die("Oops", regs, int_code);
188 do_exit(SIGKILL); 180 do_exit(SIGKILL);
189} 181}
190 182
191static void do_low_address(struct pt_regs *regs, unsigned long error_code) 183static noinline void do_low_address(struct pt_regs *regs, long int_code,
184 unsigned long trans_exc_code)
192{ 185{
193 /* Low-address protection hit in kernel mode means 186 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */ 187 NULL pointer write access in kernel mode. */
195 if (regs->psw.mask & PSW_MASK_PSTATE) { 188 if (regs->psw.mask & PSW_MASK_PSTATE) {
196 /* Low-address protection hit in user mode 'cannot happen'. */ 189 /* Low-address protection hit in user mode 'cannot happen'. */
197 die ("Low-address protection", regs, error_code); 190 die ("Low-address protection", regs, int_code);
198 do_exit(SIGKILL); 191 do_exit(SIGKILL);
199 } 192 }
200 193
201 do_no_context(regs, error_code, 0); 194 do_no_context(regs, int_code, trans_exc_code);
202} 195}
203 196
204static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 197static noinline void do_sigbus(struct pt_regs *regs, long int_code,
205 unsigned long address) 198 unsigned long trans_exc_code)
206{ 199{
207 struct task_struct *tsk = current; 200 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm;
209 201
210 up_read(&mm->mmap_sem);
211 /* 202 /*
212 * Send a sigbus, regardless of whether we were in kernel 203 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode. 204 * or user mode.
214 */ 205 */
215 tsk->thread.prot_addr = address; 206 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
216 tsk->thread.trap_no = error_code; 207 tsk->thread.trap_no = int_code;
217 force_sig(SIGBUS, tsk); 208 force_sig(SIGBUS, tsk);
218
219 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address);
222} 209}
223 210
224#ifdef CONFIG_S390_EXEC_PROTECT 211#ifdef CONFIG_S390_EXEC_PROTECT
225static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 212static noinline int signal_return(struct pt_regs *regs, long int_code,
226 unsigned long address, unsigned long error_code) 213 unsigned long trans_exc_code)
227{ 214{
228 u16 instruction; 215 u16 instruction;
229 int rc; 216 int rc;
230#ifdef CONFIG_COMPAT
231 int compat;
232#endif
233 217
234 pagefault_disable();
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 218 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 pagefault_enable();
237 if (rc)
238 return -EFAULT;
239 219
240 up_read(&mm->mmap_sem); 220 if (!rc && instruction == 0x0a77) {
241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 221 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
242#ifdef CONFIG_COMPAT 222 if (is_compat_task())
243 compat = is_compat_task(); 223 sys32_sigreturn();
244 if (compat && instruction == 0x0a77) 224 else
245 sys32_sigreturn(); 225 sys_sigreturn();
246 else if (compat && instruction == 0x0aad) 226 } else if (!rc && instruction == 0x0aad) {
247 sys32_rt_sigreturn(); 227 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
248 else 228 if (is_compat_task())
249#endif 229 sys32_rt_sigreturn();
250 if (instruction == 0x0a77) 230 else
251 sys_sigreturn(); 231 sys_rt_sigreturn();
252 else if (instruction == 0x0aad) 232 } else
253 sys_rt_sigreturn(); 233 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
254 else {
255 current->thread.prot_addr = address;
256 current->thread.trap_no = error_code;
257 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
258 }
259 return 0; 234 return 0;
260} 235}
261#endif /* CONFIG_S390_EXEC_PROTECT */ 236#endif /* CONFIG_S390_EXEC_PROTECT */
262 237
238static noinline void do_fault_error(struct pt_regs *regs, long int_code,
239 unsigned long trans_exc_code, int fault)
240{
241 int si_code;
242
243 switch (fault) {
244 case VM_FAULT_BADACCESS:
245#ifdef CONFIG_S390_EXEC_PROTECT
246 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
247 (trans_exc_code & 3) == 0) {
248 signal_return(regs, int_code, trans_exc_code);
249 break;
250 }
251#endif /* CONFIG_S390_EXEC_PROTECT */
252 case VM_FAULT_BADMAP:
253 /* Bad memory access. Check if it is kernel or user space. */
254 if (regs->psw.mask & PSW_MASK_PSTATE) {
255 /* User mode accesses just cause a SIGSEGV */
256 si_code = (fault == VM_FAULT_BADMAP) ?
257 SEGV_MAPERR : SEGV_ACCERR;
258 do_sigsegv(regs, int_code, si_code, trans_exc_code);
259 return;
260 }
261 case VM_FAULT_BADCONTEXT:
262 do_no_context(regs, int_code, trans_exc_code);
263 break;
264 default: /* fault & VM_FAULT_ERROR */
265 if (fault & VM_FAULT_OOM)
266 pagefault_out_of_memory();
267 else if (fault & VM_FAULT_SIGBUS) {
268 do_sigbus(regs, int_code, trans_exc_code);
269 /* Kernel mode? Handle exceptions or die */
270 if (!(regs->psw.mask & PSW_MASK_PSTATE))
271 do_no_context(regs, int_code, trans_exc_code);
272 } else
273 BUG();
274 break;
275 }
276}
277
263/* 278/*
264 * This routine handles page faults. It determines the address, 279 * This routine handles page faults. It determines the address,
265 * and the problem, and then passes it off to one of the appropriate 280 * and the problem, and then passes it off to one of the appropriate
266 * routines. 281 * routines.
267 * 282 *
268 * error_code: 283 * interruption code (int_code):
269 * 04 Protection -> Write-Protection (suprression) 284 * 04 Protection -> Write-Protection (suprression)
270 * 10 Segment translation -> Not present (nullification) 285 * 10 Segment translation -> Not present (nullification)
271 * 11 Page translation -> Not present (nullification) 286 * 11 Page translation -> Not present (nullification)
272 * 3b Region third trans. -> Not present (nullification) 287 * 3b Region third trans. -> Not present (nullification)
273 */ 288 */
274static inline void 289static inline int do_exception(struct pt_regs *regs, int access,
275do_exception(struct pt_regs *regs, unsigned long error_code, int write) 290 unsigned long trans_exc_code)
276{ 291{
277 struct task_struct *tsk; 292 struct task_struct *tsk;
278 struct mm_struct *mm; 293 struct mm_struct *mm;
279 struct vm_area_struct *vma; 294 struct vm_area_struct *vma;
280 unsigned long address; 295 unsigned long address;
281 int space;
282 int si_code;
283 int fault; 296 int fault;
284 297
285 if (notify_page_fault(regs, error_code)) 298 if (notify_page_fault(regs))
286 return; 299 return 0;
287 300
288 tsk = current; 301 tsk = current;
289 mm = tsk->mm; 302 mm = tsk->mm;
290 303
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
294
295 /* 304 /*
296 * Verify that the fault happened in user space, that 305 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a 306 * we are not in an interrupt and that there is a
298 * user context. 307 * user context.
299 */ 308 */
300 if (unlikely(space == 0 || in_atomic() || !mm)) 309 fault = VM_FAULT_BADCONTEXT;
301 goto no_context; 310 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
311 goto out;
302 312
313 address = trans_exc_code & __FAIL_ADDR_MASK;
303 /* 314 /*
304 * When we get here, the fault happened in the current 315 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the 316 * task's user address space, so we can switch on the
@@ -309,42 +320,26 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem); 321 down_read(&mm->mmap_sem);
311 322
312 si_code = SEGV_MAPERR; 323 fault = VM_FAULT_BADMAP;
313 vma = find_vma(mm, address); 324 vma = find_vma(mm, address);
314 if (!vma) 325 if (!vma)
315 goto bad_area; 326 goto out_up;
316
317#ifdef CONFIG_S390_EXEC_PROTECT
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
319 if (!signal_return(mm, regs, address, error_code))
320 /*
321 * signal_return() has done an up_read(&mm->mmap_sem)
322 * if it returns 0.
323 */
324 return;
325#endif
326 327
327 if (vma->vm_start <= address) 328 if (unlikely(vma->vm_start > address)) {
328 goto good_area; 329 if (!(vma->vm_flags & VM_GROWSDOWN))
329 if (!(vma->vm_flags & VM_GROWSDOWN)) 330 goto out_up;
330 goto bad_area; 331 if (expand_stack(vma, address))
331 if (expand_stack(vma, address)) 332 goto out_up;
332 goto bad_area;
333/*
334 * Ok, we have a good vm_area for this memory access, so
335 * we can handle it..
336 */
337good_area:
338 si_code = SEGV_ACCERR;
339 if (!write) {
340 /* page not present, check vm flags */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
342 goto bad_area;
343 } else {
344 if (!(vma->vm_flags & VM_WRITE))
345 goto bad_area;
346 } 333 }
347 334
335 /*
336 * Ok, we have a good vm_area for this memory access, so
337 * we can handle it..
338 */
339 fault = VM_FAULT_BADACCESS;
340 if (unlikely(!(vma->vm_flags & access)))
341 goto out_up;
342
348 if (is_vm_hugetlb_page(vma)) 343 if (is_vm_hugetlb_page(vma))
349 address &= HPAGE_MASK; 344 address &= HPAGE_MASK;
350 /* 345 /*
@@ -352,18 +347,11 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 347 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 348 * the fault.
354 */ 349 */
355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 350 fault = handle_mm_fault(mm, vma, address,
356 if (unlikely(fault & VM_FAULT_ERROR)) { 351 (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
357 if (fault & VM_FAULT_OOM) { 352 if (unlikely(fault & VM_FAULT_ERROR))
358 up_read(&mm->mmap_sem); 353 goto out_up;
359 pagefault_out_of_memory(); 354
360 return;
361 } else if (fault & VM_FAULT_SIGBUS) {
362 do_sigbus(regs, error_code, address);
363 return;
364 }
365 BUG();
366 }
367 if (fault & VM_FAULT_MAJOR) { 355 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++; 356 tsk->maj_flt++;
369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 357 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -373,74 +361,69 @@ good_area:
373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 361 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address); 362 regs, address);
375 } 363 }
376 up_read(&mm->mmap_sem);
377 /* 364 /*
378 * The instruction that caused the program check will 365 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 366 * be repeated. Don't signal single step via SIGTRAP.
380 */ 367 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 368 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
382 return; 369 fault = 0;
383 370out_up:
384/*
385 * Something tried to access memory that isn't in our memory map..
386 * Fix it, but check if it's kernel or user first..
387 */
388bad_area:
389 up_read(&mm->mmap_sem); 371 up_read(&mm->mmap_sem);
390 372out:
391 /* User mode accesses just cause a SIGSEGV */ 373 return fault;
392 if (regs->psw.mask & PSW_MASK_PSTATE) {
393 tsk->thread.prot_addr = address;
394 tsk->thread.trap_no = error_code;
395 do_sigsegv(regs, error_code, si_code, address);
396 return;
397 }
398
399no_context:
400 do_no_context(regs, error_code, address);
401} 374}
402 375
403void __kprobes do_protection_exception(struct pt_regs *regs, 376void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
404 long error_code)
405{ 377{
378 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
379 int fault;
380
406 /* Protection exception is supressing, decrement psw address. */ 381 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16); 382 regs->psw.addr -= (int_code >> 16);
408 /* 383 /*
409 * Check for low-address protection. This needs to be treated 384 * Check for low-address protection. This needs to be treated
410 * as a special case because the translation exception code 385 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case. 386 * field is not guaranteed to contain valid data in this case.
412 */ 387 */
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { 388 if (unlikely(!(trans_exc_code & 4))) {
414 do_low_address(regs, error_code); 389 do_low_address(regs, int_code, trans_exc_code);
415 return; 390 return;
416 } 391 }
417 do_exception(regs, 4, 1); 392 fault = do_exception(regs, VM_WRITE, trans_exc_code);
393 if (unlikely(fault))
394 do_fault_error(regs, 4, trans_exc_code, fault);
418} 395}
419 396
420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 397void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
421{ 398{
422 do_exception(regs, error_code & 0xff, 0); 399 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
400 int access, fault;
401
402 access = VM_READ | VM_EXEC | VM_WRITE;
403#ifdef CONFIG_S390_EXEC_PROTECT
404 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
405 (trans_exc_code & 3) == 0)
406 access = VM_EXEC;
407#endif
408 fault = do_exception(regs, access, trans_exc_code);
409 if (unlikely(fault))
410 do_fault_error(regs, int_code & 255, trans_exc_code, fault);
423} 411}
424 412
425#ifdef CONFIG_64BIT 413#ifdef CONFIG_64BIT
426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 414void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
427{ 415{
428 struct mm_struct *mm; 416 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
417 struct mm_struct *mm = current->mm;
429 struct vm_area_struct *vma; 418 struct vm_area_struct *vma;
430 unsigned long address;
431 int space;
432
433 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
436 419
437 if (unlikely(space == 0 || in_atomic() || !mm)) 420 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
438 goto no_context; 421 goto no_context;
439 422
440 local_irq_enable(); 423 local_irq_enable();
441 424
442 down_read(&mm->mmap_sem); 425 down_read(&mm->mmap_sem);
443 vma = find_vma(mm, address); 426 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
444 up_read(&mm->mmap_sem); 427 up_read(&mm->mmap_sem);
445 428
446 if (vma) { 429 if (vma) {
@@ -450,17 +433,38 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
450 433
451 /* User mode accesses just cause a SIGSEGV */ 434 /* User mode accesses just cause a SIGSEGV */
452 if (regs->psw.mask & PSW_MASK_PSTATE) { 435 if (regs->psw.mask & PSW_MASK_PSTATE) {
453 current->thread.prot_addr = address; 436 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
454 current->thread.trap_no = error_code;
455 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
456 return; 437 return;
457 } 438 }
458 439
459no_context: 440no_context:
460 do_no_context(regs, error_code, address); 441 do_no_context(regs, int_code, trans_exc_code);
461} 442}
462#endif 443#endif
463 444
445int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
446{
447 struct pt_regs regs;
448 int access, fault;
449
450 regs.psw.mask = psw_kernel_bits;
451 if (!irqs_disabled())
452 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
453 regs.psw.addr = (unsigned long) __builtin_return_address(0);
454 regs.psw.addr |= PSW_ADDR_AMODE;
455 uaddr &= PAGE_MASK;
456 access = write_user ? VM_WRITE : VM_READ;
457 fault = do_exception(&regs, access, uaddr | 2);
458 if (unlikely(fault)) {
459 if (fault & VM_FAULT_OOM) {
460 pagefault_out_of_memory();
461 fault = 0;
462 } else if (fault & VM_FAULT_SIGBUS)
463 do_sigbus(&regs, int_code, uaddr);
464 }
465 return fault ? -EFAULT : 0;
466}
467
464#ifdef CONFIG_PFAULT 468#ifdef CONFIG_PFAULT
465/* 469/*
466 * 'pfault' pseudo page faults routines. 470 * 'pfault' pseudo page faults routines.
@@ -522,7 +526,7 @@ void pfault_fini(void)
522 : : "a" (&refbk), "m" (refbk) : "cc"); 526 : : "a" (&refbk), "m" (refbk) : "cc");
523} 527}
524 528
525static void pfault_interrupt(__u16 error_code) 529static void pfault_interrupt(__u16 int_code)
526{ 530{
527 struct task_struct *tsk; 531 struct task_struct *tsk;
528 __u16 subcode; 532 __u16 subcode;