aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/fault.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-12-07 06:51:42 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:33 -0500
commit61365e132ef987f7719af5d2e434db4465957637 (patch)
tree4814cc93c782f933e52cf8e7f5e8549f86358d3b /arch/s390/mm/fault.c
parentd40f7b75a23d1e59b6ec9d6701231fd4c6992ac6 (diff)
[S390] Improve address space check.
A data access in access-register mode always is a user mode access, the code to inspect the access-registers can be removed. The second change is to use a different test to check for no-execute fault. The third change is to pass the translation exception identification as parameter, in theory the trans_exc_code in the lowcore could have been overwritten by the time the call to check_space from do_no_context is done. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r--arch/s390/mm/fault.c99
1 files changed, 45 insertions, 54 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967a..3df5b918cfe2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -100,39 +100,28 @@ void bust_spinlocks(int yes)
100 100
101/* 101/*
102 * Returns the address space associated with the fault. 102 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and 103 * Returns 0 for kernel space and 1 for user space.
104 * 2 for code execution in user space with noexec=on.
105 */ 104 */
106static inline int check_space(struct task_struct *tsk) 105static inline int user_space_fault(unsigned long trans_exc_code)
107{ 106{
108 /* 107 /*
109 * The lowest two bits of S390_lowcore.trans_exc_code 108 * The lowest two bits of the translation exception
110 * indicate which paging table was used. 109 * identification indicate which paging table was used.
111 */ 110 */
112 int desc = S390_lowcore.trans_exc_code & 3; 111 trans_exc_code &= 3;
113 112 if (trans_exc_code == 2)
114 if (desc == 3) /* Home Segment Table Descriptor */ 113 /* Access via secondary space, set_fs setting decides */
115 return switch_amode == 0; 114 return current->thread.mm_segment.ar4;
116 if (desc == 2) /* Secondary Segment Table Descriptor */ 115 if (!switch_amode)
117 return tsk->thread.mm_segment.ar4; 116 /* User space if the access has been done via home space. */
118#ifdef CONFIG_S390_SWITCH_AMODE 117 return trans_exc_code == 3;
119 if (unlikely(desc == 1)) { /* STD determined via access register */ 118 /*
120 /* %a0 always indicates primary space. */ 119 * If the user space is not the home space the kernel runs in home
121 if (S390_lowcore.exc_access_id != 0) { 120 * space. Access via secondary space has already been covered,
122 save_access_regs(tsk->thread.acrs); 121 * access via primary space or access register is from user space
123 /* 122 * and access via home space is from the kernel.
124 * An alet of 0 indicates primary space. 123 */
125 * An alet of 1 indicates secondary space. 124 return trans_exc_code != 3;
126 * Any other alet values generate an
127 * alen-translation exception.
128 */
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
131 }
132 }
133#endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
136} 125}
137 126
138/* 127/*
@@ -162,9 +151,10 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
162} 151}
163 152
164static void do_no_context(struct pt_regs *regs, unsigned long error_code, 153static void do_no_context(struct pt_regs *regs, unsigned long error_code,
165 unsigned long address) 154 unsigned long trans_exc_code)
166{ 155{
167 const struct exception_table_entry *fixup; 156 const struct exception_table_entry *fixup;
157 unsigned long address;
168 158
169 /* Are we prepared to handle this kernel fault? */ 159 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 160 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
@@ -177,7 +167,8 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
177 * Oops. The kernel tried to access some bad page. We'll have to 167 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice. 168 * terminate things with extreme prejudice.
179 */ 169 */
180 if (check_space(current) == 0) 170 address = trans_exc_code & __FAIL_ADDR_MASK;
171 if (user_space_fault(trans_exc_code) == 0)
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 172 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address); 173 " at virtual kernel address %p\n", (void *)address);
183 else 174 else
@@ -188,7 +179,8 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
188 do_exit(SIGKILL); 179 do_exit(SIGKILL);
189} 180}
190 181
191static void do_low_address(struct pt_regs *regs, unsigned long error_code) 182static void do_low_address(struct pt_regs *regs, unsigned long error_code,
183 unsigned long trans_exc_code)
192{ 184{
193 /* Low-address protection hit in kernel mode means 185 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */ 186 NULL pointer write access in kernel mode. */
@@ -198,11 +190,11 @@ static void do_low_address(struct pt_regs *regs, unsigned long error_code)
198 do_exit(SIGKILL); 190 do_exit(SIGKILL);
199 } 191 }
200 192
201 do_no_context(regs, error_code, 0); 193 do_no_context(regs, error_code, trans_exc_code);
202} 194}
203 195
204static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 196static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
205 unsigned long address) 197 unsigned long trans_exc_code)
206{ 198{
207 struct task_struct *tsk = current; 199 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm; 200 struct mm_struct *mm = tsk->mm;
@@ -212,13 +204,13 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
212 * Send a sigbus, regardless of whether we were in kernel 204 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode. 205 * or user mode.
214 */ 206 */
215 tsk->thread.prot_addr = address; 207 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
216 tsk->thread.trap_no = error_code; 208 tsk->thread.trap_no = error_code;
217 force_sig(SIGBUS, tsk); 209 force_sig(SIGBUS, tsk);
218 210
219 /* Kernel mode? Handle exceptions or die */ 211 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 212 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address); 213 do_no_context(regs, error_code, trans_exc_code);
222} 214}
223 215
224#ifdef CONFIG_S390_EXEC_PROTECT 216#ifdef CONFIG_S390_EXEC_PROTECT
@@ -272,13 +264,13 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
272 * 3b Region third trans. -> Not present (nullification) 264 * 3b Region third trans. -> Not present (nullification)
273 */ 265 */
274static inline void 266static inline void
275do_exception(struct pt_regs *regs, unsigned long error_code, int write) 267do_exception(struct pt_regs *regs, unsigned long error_code, int write,
268 unsigned long trans_exc_code)
276{ 269{
277 struct task_struct *tsk; 270 struct task_struct *tsk;
278 struct mm_struct *mm; 271 struct mm_struct *mm;
279 struct vm_area_struct *vma; 272 struct vm_area_struct *vma;
280 unsigned long address; 273 unsigned long address;
281 int space;
282 int si_code; 274 int si_code;
283 int fault; 275 int fault;
284 276
@@ -288,18 +280,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
288 tsk = current; 280 tsk = current;
289 mm = tsk->mm; 281 mm = tsk->mm;
290 282
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
294
295 /* 283 /*
296 * Verify that the fault happened in user space, that 284 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a 285 * we are not in an interrupt and that there is a
298 * user context. 286 * user context.
299 */ 287 */
300 if (unlikely(space == 0 || in_atomic() || !mm)) 288 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
301 goto no_context; 289 goto no_context;
302 290
291 address = trans_exc_code & __FAIL_ADDR_MASK;
303 /* 292 /*
304 * When we get here, the fault happened in the current 293 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the 294 * task's user address space, so we can switch on the
@@ -315,7 +304,8 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
315 goto bad_area; 304 goto bad_area;
316 305
317#ifdef CONFIG_S390_EXEC_PROTECT 306#ifdef CONFIG_S390_EXEC_PROTECT
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) 307 if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
308 (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC)))
319 if (!signal_return(mm, regs, address, error_code)) 309 if (!signal_return(mm, regs, address, error_code))
320 /* 310 /*
321 * signal_return() has done an up_read(&mm->mmap_sem) 311 * signal_return() has done an up_read(&mm->mmap_sem)
@@ -397,12 +387,14 @@ bad_area:
397 } 387 }
398 388
399no_context: 389no_context:
400 do_no_context(regs, error_code, address); 390 do_no_context(regs, error_code, trans_exc_code);
401} 391}
402 392
403void __kprobes do_protection_exception(struct pt_regs *regs, 393void __kprobes do_protection_exception(struct pt_regs *regs,
404 long error_code) 394 long error_code)
405{ 395{
396 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
397
406 /* Protection exception is supressing, decrement psw address. */ 398 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16); 399 regs->psw.addr -= (error_code >> 16);
408 /* 400 /*
@@ -410,31 +402,30 @@ void __kprobes do_protection_exception(struct pt_regs *regs,
410 * as a special case because the translation exception code 402 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case. 403 * field is not guaranteed to contain valid data in this case.
412 */ 404 */
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { 405 if (unlikely(!(trans_exc_code & 4))) {
414 do_low_address(regs, error_code); 406 do_low_address(regs, error_code, trans_exc_code);
415 return; 407 return;
416 } 408 }
417 do_exception(regs, 4, 1); 409 do_exception(regs, 4, 1, trans_exc_code);
418} 410}
419 411
420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 412void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
421{ 413{
422 do_exception(regs, error_code & 0xff, 0); 414 do_exception(regs, error_code & 0xff, 0, S390_lowcore.trans_exc_code);
423} 415}
424 416
425#ifdef CONFIG_64BIT 417#ifdef CONFIG_64BIT
426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 418void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
427{ 419{
420 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
428 struct mm_struct *mm; 421 struct mm_struct *mm;
429 struct vm_area_struct *vma; 422 struct vm_area_struct *vma;
430 unsigned long address; 423 unsigned long address;
431 int space;
432 424
433 mm = current->mm; 425 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; 426 address = trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
436 427
437 if (unlikely(space == 0 || in_atomic() || !mm)) 428 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
438 goto no_context; 429 goto no_context;
439 430
440 local_irq_enable(); 431 local_irq_enable();
@@ -457,7 +448,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
457 } 448 }
458 449
459no_context: 450no_context:
460 do_no_context(regs, error_code, address); 451 do_no_context(regs, error_code, trans_exc_code);
461} 452}
462#endif 453#endif
463 454