aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/fault.c
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2007-03-05 17:35:54 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-03-05 17:35:54 -0500
commit482b05dd533da162fa8d04c61712fae297bea3e0 (patch)
treed8c93f1557927b7a00ca07c2e07b7de8b0fa7772 /arch/s390/mm/fault.c
parent046f3e821f146d6c473edb033af15a0604957af8 (diff)
[S390] Fixed handling of access register mode faults.
Replaced check_user_space() + __check_access_register with the new check_space(). The old functions made wrong assumptions about kernel and user space when the kernel and user address spaces are switched (kernel in home space, user in primary/secondary space). Secondly the user process can switch to the accress register mode if it is running in primary or secondary mode. In addition it can load an arbitrary value to the access registers. If any other value than 0 for primary space or 1 for secondary space is loaded and memory is accessed using the base register related to the access register, the program should be terminated with a SIGSEGV. To achieve that the DUALD pointer in the DUCT and the PSALD pointer in the PASTE need to point to an array of 8 invalid access-list entries to get a ALEN-translation exception if an invalid alet is used. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r--arch/s390/mm/fault.c105
1 files changed, 47 insertions, 58 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 641aef36ccc4..7462aebd3eb6 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -108,53 +108,40 @@ void bust_spinlocks(int yes)
108} 108}
109 109
110/* 110/*
111 * Check which address space is addressed by the access 111 * Returns the address space associated with the fault.
112 * register in S390_lowcore.exc_access_id. 112 * Returns 0 for kernel space, 1 for user space and
113 * Returns 1 for user space and 0 for kernel space. 113 * 2 for code execution in user space with noexec=on.
114 */ 114 */
115static int __check_access_register(struct pt_regs *regs, int error_code) 115static inline int check_space(struct task_struct *tsk)
116{
117 int areg = S390_lowcore.exc_access_id;
118
119 if (areg == 0)
120 /* Access via access register 0 -> kernel address */
121 return 0;
122 save_access_regs(current->thread.acrs);
123 if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
124 /*
125 * access register contains 0 -> kernel address,
126 * access register contains 1 -> user space address
127 */
128 return current->thread.acrs[areg];
129
130 /* Something unhealthy was done with the access registers... */
131 die("page fault via unknown access register", regs, error_code);
132 do_exit(SIGKILL);
133 return 0;
134}
135
136/*
137 * Check which address space the address belongs to.
138 * May return 1 or 2 for user space and 0 for kernel space.
139 * Returns 2 for user space in primary addressing mode with
140 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */
142static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 116{
144 /* 117 /*
145 * The lowest two bits of S390_lowcore.trans_exc_code indicate 118 * The lowest two bits of S390_lowcore.trans_exc_code
146 * which paging table was used: 119 * indicate which paging table was used.
147 * 0: Primary Segment Table Descriptor
148 * 1: STD determined via access register
149 * 2: Secondary Segment Table Descriptor
150 * 3: Home Segment Table Descriptor
151 */ 120 */
152 int descriptor = S390_lowcore.trans_exc_code & 3; 121 int desc = S390_lowcore.trans_exc_code & 3;
153 if (unlikely(descriptor == 1)) 122
154 return __check_access_register(regs, error_code); 123 if (desc == 3) /* Home Segment Table Descriptor */
155 if (descriptor == 2) 124 return switch_amode == 0;
156 return current->thread.mm_segment.ar4; 125 if (desc == 2) /* Secondary Segment Table Descriptor */
157 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; 126 return tsk->thread.mm_segment.ar4;
127#ifdef CONFIG_S390_SWITCH_AMODE
128 if (unlikely(desc == 1)) { /* STD determined via access register */
129 /* %a0 always indicates primary space. */
130 if (S390_lowcore.exc_access_id != 0) {
131 save_access_regs(tsk->thread.acrs);
132 /*
133 * An alet of 0 indicates primary space.
134 * An alet of 1 indicates secondary space.
135 * Any other alet values generate an
136 * alen-translation exception.
137 */
138 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
139 return tsk->thread.mm_segment.ar4;
140 }
141 }
142#endif
143 /* Primary Segment Table Descriptor */
144 return switch_amode << s390_noexec;
158} 145}
159 146
160/* 147/*
@@ -265,16 +252,16 @@ out_fault:
265 * 11 Page translation -> Not present (nullification) 252 * 11 Page translation -> Not present (nullification)
266 * 3b Region third trans. -> Not present (nullification) 253 * 3b Region third trans. -> Not present (nullification)
267 */ 254 */
268static inline void __kprobes 255static inline void
269do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 256do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
270{ 257{
271 struct task_struct *tsk; 258 struct task_struct *tsk;
272 struct mm_struct *mm; 259 struct mm_struct *mm;
273 struct vm_area_struct * vma; 260 struct vm_area_struct * vma;
274 unsigned long address; 261 unsigned long address;
275 int user_address;
276 const struct exception_table_entry *fixup; 262 const struct exception_table_entry *fixup;
277 int si_code = SEGV_MAPERR; 263 int si_code;
264 int space;
278 265
279 tsk = current; 266 tsk = current;
280 mm = tsk->mm; 267 mm = tsk->mm;
@@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
294 NULL pointer write access in kernel mode. */ 281 NULL pointer write access in kernel mode. */
295 if (!(regs->psw.mask & PSW_MASK_PSTATE)) { 282 if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
296 address = 0; 283 address = 0;
297 user_address = 0; 284 space = 0;
298 goto no_context; 285 goto no_context;
299 } 286 }
300 287
@@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
309 * the address 296 * the address
310 */ 297 */
311 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; 298 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
312 user_address = check_user_space(regs, error_code); 299 space = check_space(tsk);
313 300
314 /* 301 /*
315 * Verify that the fault happened in user space, that 302 * Verify that the fault happened in user space, that
316 * we are not in an interrupt and that there is a 303 * we are not in an interrupt and that there is a
317 * user context. 304 * user context.
318 */ 305 */
319 if (user_address == 0 || in_atomic() || !mm) 306 if (unlikely(space == 0 || in_atomic() || !mm))
320 goto no_context; 307 goto no_context;
321 308
322 /* 309 /*
323 * When we get here, the fault happened in the current 310 * When we get here, the fault happened in the current
@@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
328 315
329 down_read(&mm->mmap_sem); 316 down_read(&mm->mmap_sem);
330 317
331 vma = find_vma(mm, address); 318 si_code = SEGV_MAPERR;
332 if (!vma) 319 vma = find_vma(mm, address);
333 goto bad_area; 320 if (!vma)
321 goto bad_area;
334 322
335#ifdef CONFIG_S390_EXEC_PROTECT 323#ifdef CONFIG_S390_EXEC_PROTECT
336 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) 324 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
337 if (!signal_return(mm, regs, address, error_code)) 325 if (!signal_return(mm, regs, address, error_code))
338 /* 326 /*
339 * signal_return() has done an up_read(&mm->mmap_sem) 327 * signal_return() has done an up_read(&mm->mmap_sem)
@@ -389,7 +377,7 @@ survive:
389 * The instruction that caused the program check will 377 * The instruction that caused the program check will
390 * be repeated. Don't signal single step via SIGTRAP. 378 * be repeated. Don't signal single step via SIGTRAP.
391 */ 379 */
392 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 380 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
393 return; 381 return;
394 382
395/* 383/*
@@ -419,7 +407,7 @@ no_context:
419 * Oops. The kernel tried to access some bad page. We'll have to 407 * Oops. The kernel tried to access some bad page. We'll have to
420 * terminate things with extreme prejudice. 408 * terminate things with extreme prejudice.
421 */ 409 */
422 if (user_address == 0) 410 if (space == 0)
423 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 411 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
424 " at virtual kernel address %p\n", (void *)address); 412 " at virtual kernel address %p\n", (void *)address);
425 else 413 else
@@ -462,13 +450,14 @@ do_sigbus:
462 goto no_context; 450 goto no_context;
463} 451}
464 452
465void do_protection_exception(struct pt_regs *regs, unsigned long error_code) 453void __kprobes do_protection_exception(struct pt_regs *regs,
454 unsigned long error_code)
466{ 455{
467 regs->psw.addr -= (error_code >> 16); 456 regs->psw.addr -= (error_code >> 16);
468 do_exception(regs, 4, 1); 457 do_exception(regs, 4, 1);
469} 458}
470 459
471void do_dat_exception(struct pt_regs *regs, unsigned long error_code) 460void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
472{ 461{
473 do_exception(regs, error_code & 0xff, 0); 462 do_exception(regs, error_code & 0xff, 0);
474} 463}