aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r--arch/s390/mm/fault.c379
1 files changed, 191 insertions, 188 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967a..3040d7c78fe0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -30,20 +30,20 @@
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <asm/asm-offsets.h>
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 36#include <asm/s390_ext.h>
36#include <asm/mmu_context.h> 37#include <asm/mmu_context.h>
38#include <asm/compat.h>
37#include "../kernel/entry.h" 39#include "../kernel/entry.h"
38 40
39#ifndef CONFIG_64BIT 41#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000 42#define __FAIL_ADDR_MASK 0x7ffff000
41#define __FIXUP_MASK 0x7fffffff
42#define __SUBCODE_MASK 0x0200 43#define __SUBCODE_MASK 0x0200
43#define __PF_RES_FIELD 0ULL 44#define __PF_RES_FIELD 0ULL
44#else /* CONFIG_64BIT */ 45#else /* CONFIG_64BIT */
45#define __FAIL_ADDR_MASK -4096L 46#define __FAIL_ADDR_MASK -4096L
46#define __FIXUP_MASK ~0L
47#define __SUBCODE_MASK 0x0600 47#define __SUBCODE_MASK 0x0600
48#define __PF_RES_FIELD 0x8000000000000000ULL 48#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 49#endif /* CONFIG_64BIT */
@@ -52,27 +52,23 @@
52extern int sysctl_userprocess_debug; 52extern int sysctl_userprocess_debug;
53#endif 53#endif
54 54
55#ifdef CONFIG_KPROBES 55#define VM_FAULT_BADCONTEXT 0x010000
56static inline int notify_page_fault(struct pt_regs *regs, long err) 56#define VM_FAULT_BADMAP 0x020000
57#define VM_FAULT_BADACCESS 0x040000
58
59static inline int notify_page_fault(struct pt_regs *regs)
57{ 60{
58 int ret = 0; 61 int ret = 0;
59 62
60 /* kprobe_running() needs smp_processor_id() */ 63 /* kprobe_running() needs smp_processor_id() */
61 if (!user_mode(regs)) { 64 if (kprobes_built_in() && !user_mode(regs)) {
62 preempt_disable(); 65 preempt_disable();
63 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 66 if (kprobe_running() && kprobe_fault_handler(regs, 14))
64 ret = 1; 67 ret = 1;
65 preempt_enable(); 68 preempt_enable();
66 } 69 }
67
68 return ret; 70 return ret;
69} 71}
70#else
71static inline int notify_page_fault(struct pt_regs *regs, long err)
72{
73 return 0;
74}
75#endif
76 72
77 73
78/* 74/*
@@ -100,57 +96,50 @@ void bust_spinlocks(int yes)
100 96
101/* 97/*
102 * Returns the address space associated with the fault. 98 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and 99 * Returns 0 for kernel space and 1 for user space.
104 * 2 for code execution in user space with noexec=on.
105 */ 100 */
106static inline int check_space(struct task_struct *tsk) 101static inline int user_space_fault(unsigned long trans_exc_code)
107{ 102{
108 /* 103 /*
109 * The lowest two bits of S390_lowcore.trans_exc_code 104 * The lowest two bits of the translation exception
110 * indicate which paging table was used. 105 * identification indicate which paging table was used.
111 */ 106 */
112 int desc = S390_lowcore.trans_exc_code & 3; 107 trans_exc_code &= 3;
113 108 if (trans_exc_code == 2)
114 if (desc == 3) /* Home Segment Table Descriptor */ 109 /* Access via secondary space, set_fs setting decides */
115 return switch_amode == 0; 110 return current->thread.mm_segment.ar4;
116 if (desc == 2) /* Secondary Segment Table Descriptor */ 111 if (user_mode == HOME_SPACE_MODE)
117 return tsk->thread.mm_segment.ar4; 112 /* User space if the access has been done via home space. */
118#ifdef CONFIG_S390_SWITCH_AMODE 113 return trans_exc_code == 3;
119 if (unlikely(desc == 1)) { /* STD determined via access register */ 114 /*
120 /* %a0 always indicates primary space. */ 115 * If the user space is not the home space the kernel runs in home
121 if (S390_lowcore.exc_access_id != 0) { 116 * space. Access via secondary space has already been covered,
122 save_access_regs(tsk->thread.acrs); 117 * access via primary space or access register is from user space
123 /* 118 * and access via home space is from the kernel.
124 * An alet of 0 indicates primary space. 119 */
125 * An alet of 1 indicates secondary space. 120 return trans_exc_code != 3;
126 * Any other alet values generate an
127 * alen-translation exception.
128 */
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
131 }
132 }
133#endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
136} 121}
137 122
138/* 123/*
139 * Send SIGSEGV to task. This is an external routine 124 * Send SIGSEGV to task. This is an external routine
140 * to keep the stack usage of do_page_fault small. 125 * to keep the stack usage of do_page_fault small.
141 */ 126 */
142static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, 127static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
143 int si_code, unsigned long address) 128 int si_code, unsigned long trans_exc_code)
144{ 129{
145 struct siginfo si; 130 struct siginfo si;
131 unsigned long address;
146 132
133 address = trans_exc_code & __FAIL_ADDR_MASK;
134 current->thread.prot_addr = address;
135 current->thread.trap_no = int_code;
147#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 136#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
148#if defined(CONFIG_SYSCTL) 137#if defined(CONFIG_SYSCTL)
149 if (sysctl_userprocess_debug) 138 if (sysctl_userprocess_debug)
150#endif 139#endif
151 { 140 {
152 printk("User process fault: interruption code 0x%lX\n", 141 printk("User process fault: interruption code 0x%lX\n",
153 error_code); 142 int_code);
154 printk("failing address: %lX\n", address); 143 printk("failing address: %lX\n", address);
155 show_regs(regs); 144 show_regs(regs);
156 } 145 }
@@ -161,13 +150,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
161 force_sig_info(SIGSEGV, &si, current); 150 force_sig_info(SIGSEGV, &si, current);
162} 151}
163 152
164static void do_no_context(struct pt_regs *regs, unsigned long error_code, 153static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long address) 154 unsigned long trans_exc_code)
166{ 155{
167 const struct exception_table_entry *fixup; 156 const struct exception_table_entry *fixup;
157 unsigned long address;
168 158
169 /* Are we prepared to handle this kernel fault? */ 159 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 160 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
171 if (fixup) { 161 if (fixup) {
172 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 162 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
173 return; 163 return;
@@ -177,129 +167,149 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
177 * Oops. The kernel tried to access some bad page. We'll have to 167 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice. 168 * terminate things with extreme prejudice.
179 */ 169 */
180 if (check_space(current) == 0) 170 address = trans_exc_code & __FAIL_ADDR_MASK;
171 if (!user_space_fault(trans_exc_code))
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 172 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address); 173 " at virtual kernel address %p\n", (void *)address);
183 else 174 else
184 printk(KERN_ALERT "Unable to handle kernel paging request" 175 printk(KERN_ALERT "Unable to handle kernel paging request"
185 " at virtual user address %p\n", (void *)address); 176 " at virtual user address %p\n", (void *)address);
186 177
187 die("Oops", regs, error_code); 178 die("Oops", regs, int_code);
188 do_exit(SIGKILL); 179 do_exit(SIGKILL);
189} 180}
190 181
191static void do_low_address(struct pt_regs *regs, unsigned long error_code) 182static noinline void do_low_address(struct pt_regs *regs, long int_code,
183 unsigned long trans_exc_code)
192{ 184{
193 /* Low-address protection hit in kernel mode means 185 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */ 186 NULL pointer write access in kernel mode. */
195 if (regs->psw.mask & PSW_MASK_PSTATE) { 187 if (regs->psw.mask & PSW_MASK_PSTATE) {
196 /* Low-address protection hit in user mode 'cannot happen'. */ 188 /* Low-address protection hit in user mode 'cannot happen'. */
197 die ("Low-address protection", regs, error_code); 189 die ("Low-address protection", regs, int_code);
198 do_exit(SIGKILL); 190 do_exit(SIGKILL);
199 } 191 }
200 192
201 do_no_context(regs, error_code, 0); 193 do_no_context(regs, int_code, trans_exc_code);
202} 194}
203 195
204static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 196static noinline void do_sigbus(struct pt_regs *regs, long int_code,
205 unsigned long address) 197 unsigned long trans_exc_code)
206{ 198{
207 struct task_struct *tsk = current; 199 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm;
209 200
210 up_read(&mm->mmap_sem);
211 /* 201 /*
212 * Send a sigbus, regardless of whether we were in kernel 202 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode. 203 * or user mode.
214 */ 204 */
215 tsk->thread.prot_addr = address; 205 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
216 tsk->thread.trap_no = error_code; 206 tsk->thread.trap_no = int_code;
217 force_sig(SIGBUS, tsk); 207 force_sig(SIGBUS, tsk);
218
219 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address);
222} 208}
223 209
224#ifdef CONFIG_S390_EXEC_PROTECT 210#ifdef CONFIG_S390_EXEC_PROTECT
225static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 211static noinline int signal_return(struct pt_regs *regs, long int_code,
226 unsigned long address, unsigned long error_code) 212 unsigned long trans_exc_code)
227{ 213{
228 u16 instruction; 214 u16 instruction;
229 int rc; 215 int rc;
230#ifdef CONFIG_COMPAT
231 int compat;
232#endif
233 216
234 pagefault_disable();
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 217 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 pagefault_enable();
237 if (rc)
238 return -EFAULT;
239 218
240 up_read(&mm->mmap_sem); 219 if (!rc && instruction == 0x0a77) {
241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 220 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
242#ifdef CONFIG_COMPAT 221 if (is_compat_task())
243 compat = is_compat_task(); 222 sys32_sigreturn();
244 if (compat && instruction == 0x0a77) 223 else
245 sys32_sigreturn(); 224 sys_sigreturn();
246 else if (compat && instruction == 0x0aad) 225 } else if (!rc && instruction == 0x0aad) {
247 sys32_rt_sigreturn(); 226 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
248 else 227 if (is_compat_task())
249#endif 228 sys32_rt_sigreturn();
250 if (instruction == 0x0a77) 229 else
251 sys_sigreturn(); 230 sys_rt_sigreturn();
252 else if (instruction == 0x0aad) 231 } else
253 sys_rt_sigreturn(); 232 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
254 else {
255 current->thread.prot_addr = address;
256 current->thread.trap_no = error_code;
257 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
258 }
259 return 0; 233 return 0;
260} 234}
261#endif /* CONFIG_S390_EXEC_PROTECT */ 235#endif /* CONFIG_S390_EXEC_PROTECT */
262 236
237static noinline void do_fault_error(struct pt_regs *regs, long int_code,
238 unsigned long trans_exc_code, int fault)
239{
240 int si_code;
241
242 switch (fault) {
243 case VM_FAULT_BADACCESS:
244#ifdef CONFIG_S390_EXEC_PROTECT
245 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
246 (trans_exc_code & 3) == 0) {
247 signal_return(regs, int_code, trans_exc_code);
248 break;
249 }
250#endif /* CONFIG_S390_EXEC_PROTECT */
251 case VM_FAULT_BADMAP:
252 /* Bad memory access. Check if it is kernel or user space. */
253 if (regs->psw.mask & PSW_MASK_PSTATE) {
254 /* User mode accesses just cause a SIGSEGV */
255 si_code = (fault == VM_FAULT_BADMAP) ?
256 SEGV_MAPERR : SEGV_ACCERR;
257 do_sigsegv(regs, int_code, si_code, trans_exc_code);
258 return;
259 }
260 case VM_FAULT_BADCONTEXT:
261 do_no_context(regs, int_code, trans_exc_code);
262 break;
263 default: /* fault & VM_FAULT_ERROR */
264 if (fault & VM_FAULT_OOM)
265 pagefault_out_of_memory();
266 else if (fault & VM_FAULT_SIGBUS) {
267 do_sigbus(regs, int_code, trans_exc_code);
268 /* Kernel mode? Handle exceptions or die */
269 if (!(regs->psw.mask & PSW_MASK_PSTATE))
270 do_no_context(regs, int_code, trans_exc_code);
271 } else
272 BUG();
273 break;
274 }
275}
276
263/* 277/*
264 * This routine handles page faults. It determines the address, 278 * This routine handles page faults. It determines the address,
265 * and the problem, and then passes it off to one of the appropriate 279 * and the problem, and then passes it off to one of the appropriate
266 * routines. 280 * routines.
267 * 281 *
268 * error_code: 282 * interruption code (int_code):
269 * 04 Protection -> Write-Protection (suprression) 283 * 04 Protection -> Write-Protection (suprression)
270 * 10 Segment translation -> Not present (nullification) 284 * 10 Segment translation -> Not present (nullification)
271 * 11 Page translation -> Not present (nullification) 285 * 11 Page translation -> Not present (nullification)
272 * 3b Region third trans. -> Not present (nullification) 286 * 3b Region third trans. -> Not present (nullification)
273 */ 287 */
274static inline void 288static inline int do_exception(struct pt_regs *regs, int access,
275do_exception(struct pt_regs *regs, unsigned long error_code, int write) 289 unsigned long trans_exc_code)
276{ 290{
277 struct task_struct *tsk; 291 struct task_struct *tsk;
278 struct mm_struct *mm; 292 struct mm_struct *mm;
279 struct vm_area_struct *vma; 293 struct vm_area_struct *vma;
280 unsigned long address; 294 unsigned long address;
281 int space;
282 int si_code;
283 int fault; 295 int fault;
284 296
285 if (notify_page_fault(regs, error_code)) 297 if (notify_page_fault(regs))
286 return; 298 return 0;
287 299
288 tsk = current; 300 tsk = current;
289 mm = tsk->mm; 301 mm = tsk->mm;
290 302
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
294
295 /* 303 /*
296 * Verify that the fault happened in user space, that 304 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a 305 * we are not in an interrupt and that there is a
298 * user context. 306 * user context.
299 */ 307 */
300 if (unlikely(space == 0 || in_atomic() || !mm)) 308 fault = VM_FAULT_BADCONTEXT;
301 goto no_context; 309 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
310 goto out;
302 311
312 address = trans_exc_code & __FAIL_ADDR_MASK;
303 /* 313 /*
304 * When we get here, the fault happened in the current 314 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the 315 * task's user address space, so we can switch on the
@@ -309,42 +319,26 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 319 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem); 320 down_read(&mm->mmap_sem);
311 321
312 si_code = SEGV_MAPERR; 322 fault = VM_FAULT_BADMAP;
313 vma = find_vma(mm, address); 323 vma = find_vma(mm, address);
314 if (!vma) 324 if (!vma)
315 goto bad_area; 325 goto out_up;
316 326
317#ifdef CONFIG_S390_EXEC_PROTECT 327 if (unlikely(vma->vm_start > address)) {
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) 328 if (!(vma->vm_flags & VM_GROWSDOWN))
319 if (!signal_return(mm, regs, address, error_code)) 329 goto out_up;
320 /* 330 if (expand_stack(vma, address))
321 * signal_return() has done an up_read(&mm->mmap_sem) 331 goto out_up;
322 * if it returns 0.
323 */
324 return;
325#endif
326
327 if (vma->vm_start <= address)
328 goto good_area;
329 if (!(vma->vm_flags & VM_GROWSDOWN))
330 goto bad_area;
331 if (expand_stack(vma, address))
332 goto bad_area;
333/*
334 * Ok, we have a good vm_area for this memory access, so
335 * we can handle it..
336 */
337good_area:
338 si_code = SEGV_ACCERR;
339 if (!write) {
340 /* page not present, check vm flags */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
342 goto bad_area;
343 } else {
344 if (!(vma->vm_flags & VM_WRITE))
345 goto bad_area;
346 } 332 }
347 333
334 /*
335 * Ok, we have a good vm_area for this memory access, so
336 * we can handle it..
337 */
338 fault = VM_FAULT_BADACCESS;
339 if (unlikely(!(vma->vm_flags & access)))
340 goto out_up;
341
348 if (is_vm_hugetlb_page(vma)) 342 if (is_vm_hugetlb_page(vma))
349 address &= HPAGE_MASK; 343 address &= HPAGE_MASK;
350 /* 344 /*
@@ -352,18 +346,11 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 346 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 347 * the fault.
354 */ 348 */
355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 349 fault = handle_mm_fault(mm, vma, address,
356 if (unlikely(fault & VM_FAULT_ERROR)) { 350 (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
357 if (fault & VM_FAULT_OOM) { 351 if (unlikely(fault & VM_FAULT_ERROR))
358 up_read(&mm->mmap_sem); 352 goto out_up;
359 pagefault_out_of_memory(); 353
360 return;
361 } else if (fault & VM_FAULT_SIGBUS) {
362 do_sigbus(regs, error_code, address);
363 return;
364 }
365 BUG();
366 }
367 if (fault & VM_FAULT_MAJOR) { 354 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++; 355 tsk->maj_flt++;
369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 356 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -373,74 +360,69 @@ good_area:
373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 360 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address); 361 regs, address);
375 } 362 }
376 up_read(&mm->mmap_sem);
377 /* 363 /*
378 * The instruction that caused the program check will 364 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 365 * be repeated. Don't signal single step via SIGTRAP.
380 */ 366 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 367 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
382 return; 368 fault = 0;
383 369out_up:
384/*
385 * Something tried to access memory that isn't in our memory map..
386 * Fix it, but check if it's kernel or user first..
387 */
388bad_area:
389 up_read(&mm->mmap_sem); 370 up_read(&mm->mmap_sem);
390 371out:
391 /* User mode accesses just cause a SIGSEGV */ 372 return fault;
392 if (regs->psw.mask & PSW_MASK_PSTATE) {
393 tsk->thread.prot_addr = address;
394 tsk->thread.trap_no = error_code;
395 do_sigsegv(regs, error_code, si_code, address);
396 return;
397 }
398
399no_context:
400 do_no_context(regs, error_code, address);
401} 373}
402 374
403void __kprobes do_protection_exception(struct pt_regs *regs, 375void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
404 long error_code)
405{ 376{
377 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
378 int fault;
379
406 /* Protection exception is supressing, decrement psw address. */ 380 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16); 381 regs->psw.addr -= (int_code >> 16);
408 /* 382 /*
409 * Check for low-address protection. This needs to be treated 383 * Check for low-address protection. This needs to be treated
410 * as a special case because the translation exception code 384 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case. 385 * field is not guaranteed to contain valid data in this case.
412 */ 386 */
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { 387 if (unlikely(!(trans_exc_code & 4))) {
414 do_low_address(regs, error_code); 388 do_low_address(regs, int_code, trans_exc_code);
415 return; 389 return;
416 } 390 }
417 do_exception(regs, 4, 1); 391 fault = do_exception(regs, VM_WRITE, trans_exc_code);
392 if (unlikely(fault))
393 do_fault_error(regs, 4, trans_exc_code, fault);
418} 394}
419 395
420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 396void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
421{ 397{
422 do_exception(regs, error_code & 0xff, 0); 398 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
399 int access, fault;
400
401 access = VM_READ | VM_EXEC | VM_WRITE;
402#ifdef CONFIG_S390_EXEC_PROTECT
403 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
404 (trans_exc_code & 3) == 0)
405 access = VM_EXEC;
406#endif
407 fault = do_exception(regs, access, trans_exc_code);
408 if (unlikely(fault))
409 do_fault_error(regs, int_code & 255, trans_exc_code, fault);
423} 410}
424 411
425#ifdef CONFIG_64BIT 412#ifdef CONFIG_64BIT
426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 413void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
427{ 414{
428 struct mm_struct *mm; 415 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
416 struct mm_struct *mm = current->mm;
429 struct vm_area_struct *vma; 417 struct vm_area_struct *vma;
430 unsigned long address;
431 int space;
432
433 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
436 418
437 if (unlikely(space == 0 || in_atomic() || !mm)) 419 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
438 goto no_context; 420 goto no_context;
439 421
440 local_irq_enable(); 422 local_irq_enable();
441 423
442 down_read(&mm->mmap_sem); 424 down_read(&mm->mmap_sem);
443 vma = find_vma(mm, address); 425 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
444 up_read(&mm->mmap_sem); 426 up_read(&mm->mmap_sem);
445 427
446 if (vma) { 428 if (vma) {
@@ -450,17 +432,38 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
450 432
451 /* User mode accesses just cause a SIGSEGV */ 433 /* User mode accesses just cause a SIGSEGV */
452 if (regs->psw.mask & PSW_MASK_PSTATE) { 434 if (regs->psw.mask & PSW_MASK_PSTATE) {
453 current->thread.prot_addr = address; 435 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
454 current->thread.trap_no = error_code;
455 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
456 return; 436 return;
457 } 437 }
458 438
459no_context: 439no_context:
460 do_no_context(regs, error_code, address); 440 do_no_context(regs, int_code, trans_exc_code);
461} 441}
462#endif 442#endif
463 443
444int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
445{
446 struct pt_regs regs;
447 int access, fault;
448
449 regs.psw.mask = psw_kernel_bits;
450 if (!irqs_disabled())
451 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
452 regs.psw.addr = (unsigned long) __builtin_return_address(0);
453 regs.psw.addr |= PSW_ADDR_AMODE;
454 uaddr &= PAGE_MASK;
455 access = write_user ? VM_WRITE : VM_READ;
456 fault = do_exception(&regs, access, uaddr | 2);
457 if (unlikely(fault)) {
458 if (fault & VM_FAULT_OOM) {
459 pagefault_out_of_memory();
460 fault = 0;
461 } else if (fault & VM_FAULT_SIGBUS)
462 do_sigbus(&regs, int_code, uaddr);
463 }
464 return fault ? -EFAULT : 0;
465}
466
464#ifdef CONFIG_PFAULT 467#ifdef CONFIG_PFAULT
465/* 468/*
466 * 'pfault' pseudo page faults routines. 469 * 'pfault' pseudo page faults routines.
@@ -522,7 +525,7 @@ void pfault_fini(void)
522 : : "a" (&refbk), "m" (refbk) : "cc"); 525 : : "a" (&refbk), "m" (refbk) : "cc");
523} 526}
524 527
525static void pfault_interrupt(__u16 error_code) 528static void pfault_interrupt(__u16 int_code)
526{ 529{
527 struct task_struct *tsk; 530 struct task_struct *tsk;
528 __u16 subcode; 531 __u16 subcode;