aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/traps_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/traps_32.c')
-rw-r--r--arch/sh/kernel/traps_32.c164
1 files changed, 76 insertions, 88 deletions
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2e58f7a6b746..baa4fa368dce 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -147,6 +147,36 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
147 return -EFAULT; 147 return -EFAULT;
148} 148}
149 149
150static inline void sign_extend(unsigned int count, unsigned char *dst)
151{
152#ifdef __LITTLE_ENDIAN__
153 if ((count == 1) && dst[0] & 0x80) {
154 dst[1] = 0xff;
155 dst[2] = 0xff;
156 dst[3] = 0xff;
157 }
158 if ((count == 2) && dst[1] & 0x80) {
159 dst[2] = 0xff;
160 dst[3] = 0xff;
161 }
162#else
163 if ((count == 1) && dst[3] & 0x80) {
164 dst[2] = 0xff;
165 dst[1] = 0xff;
166 dst[0] = 0xff;
167 }
168 if ((count == 2) && dst[2] & 0x80) {
169 dst[1] = 0xff;
170 dst[0] = 0xff;
171 }
172#endif
173}
174
175static struct mem_access user_mem_access = {
176 copy_from_user,
177 copy_to_user,
178};
179
150/* 180/*
151 * handle an instruction that does an unaligned memory access by emulating the 181 * handle an instruction that does an unaligned memory access by emulating the
152 * desired behaviour 182 * desired behaviour
@@ -154,7 +184,8 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
154 * (if that instruction is in a branch delay slot) 184 * (if that instruction is in a branch delay slot)
155 * - return 0 if emulation okay, -EFAULT on existential error 185 * - return 0 if emulation okay, -EFAULT on existential error
156 */ 186 */
157static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) 187static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
188 struct mem_access *ma)
158{ 189{
159 int ret, index, count; 190 int ret, index, count;
160 unsigned long *rm, *rn; 191 unsigned long *rm, *rn;
@@ -178,25 +209,13 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
178 dst = (unsigned char*) rn; 209 dst = (unsigned char*) rn;
179 *(unsigned long*)dst = 0; 210 *(unsigned long*)dst = 0;
180 211
181#ifdef __LITTLE_ENDIAN__ 212#if !defined(__LITTLE_ENDIAN__)
182 if (copy_from_user(dst, src, count))
183 goto fetch_fault;
184
185 if ((count == 2) && dst[1] & 0x80) {
186 dst[2] = 0xff;
187 dst[3] = 0xff;
188 }
189#else
190 dst += 4-count; 213 dst += 4-count;
191 214#endif
192 if (__copy_user(dst, src, count)) 215 if (ma->from(dst, src, count))
193 goto fetch_fault; 216 goto fetch_fault;
194 217
195 if ((count == 2) && dst[2] & 0x80) { 218 sign_extend(count, dst);
196 dst[0] = 0xff;
197 dst[1] = 0xff;
198 }
199#endif
200 } else { 219 } else {
201 /* to memory */ 220 /* to memory */
202 src = (unsigned char*) rm; 221 src = (unsigned char*) rm;
@@ -206,7 +225,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
206 dst = (unsigned char*) *rn; 225 dst = (unsigned char*) *rn;
207 dst += regs->regs[0]; 226 dst += regs->regs[0];
208 227
209 if (copy_to_user(dst, src, count)) 228 if (ma->to(dst, src, count))
210 goto fetch_fault; 229 goto fetch_fault;
211 } 230 }
212 ret = 0; 231 ret = 0;
@@ -217,7 +236,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
217 dst = (unsigned char*) *rn; 236 dst = (unsigned char*) *rn;
218 dst += (instruction&0x000F)<<2; 237 dst += (instruction&0x000F)<<2;
219 238
220 if (copy_to_user(dst,src,4)) 239 if (ma->to(dst, src, 4))
221 goto fetch_fault; 240 goto fetch_fault;
222 ret = 0; 241 ret = 0;
223 break; 242 break;
@@ -230,7 +249,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
230#if !defined(__LITTLE_ENDIAN__) 249#if !defined(__LITTLE_ENDIAN__)
231 src += 4-count; 250 src += 4-count;
232#endif 251#endif
233 if (copy_to_user(dst, src, count)) 252 if (ma->to(dst, src, count))
234 goto fetch_fault; 253 goto fetch_fault;
235 ret = 0; 254 ret = 0;
236 break; 255 break;
@@ -241,7 +260,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
241 dst = (unsigned char*) rn; 260 dst = (unsigned char*) rn;
242 *(unsigned long*)dst = 0; 261 *(unsigned long*)dst = 0;
243 262
244 if (copy_from_user(dst,src,4)) 263 if (ma->from(dst, src, 4))
245 goto fetch_fault; 264 goto fetch_fault;
246 ret = 0; 265 ret = 0;
247 break; 266 break;
@@ -253,25 +272,12 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
253 dst = (unsigned char*) rn; 272 dst = (unsigned char*) rn;
254 *(unsigned long*)dst = 0; 273 *(unsigned long*)dst = 0;
255 274
256#ifdef __LITTLE_ENDIAN__ 275#if !defined(__LITTLE_ENDIAN__)
257 if (copy_from_user(dst, src, count))
258 goto fetch_fault;
259
260 if ((count == 2) && dst[1] & 0x80) {
261 dst[2] = 0xff;
262 dst[3] = 0xff;
263 }
264#else
265 dst += 4-count; 276 dst += 4-count;
266
267 if (copy_from_user(dst, src, count))
268 goto fetch_fault;
269
270 if ((count == 2) && dst[2] & 0x80) {
271 dst[0] = 0xff;
272 dst[1] = 0xff;
273 }
274#endif 277#endif
278 if (ma->from(dst, src, count))
279 goto fetch_fault;
280 sign_extend(count, dst);
275 ret = 0; 281 ret = 0;
276 break; 282 break;
277 283
@@ -285,7 +291,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
285 dst = (unsigned char*) *rm; /* called Rn in the spec */ 291 dst = (unsigned char*) *rm; /* called Rn in the spec */
286 dst += (instruction&0x000F)<<1; 292 dst += (instruction&0x000F)<<1;
287 293
288 if (copy_to_user(dst, src, 2)) 294 if (ma->to(dst, src, 2))
289 goto fetch_fault; 295 goto fetch_fault;
290 ret = 0; 296 ret = 0;
291 break; 297 break;
@@ -299,21 +305,9 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
299#if !defined(__LITTLE_ENDIAN__) 305#if !defined(__LITTLE_ENDIAN__)
300 dst += 2; 306 dst += 2;
301#endif 307#endif
302 308 if (ma->from(dst, src, 2))
303 if (copy_from_user(dst, src, 2))
304 goto fetch_fault; 309 goto fetch_fault;
305 310 sign_extend(2, dst);
306#ifdef __LITTLE_ENDIAN__
307 if (dst[1] & 0x80) {
308 dst[2] = 0xff;
309 dst[3] = 0xff;
310 }
311#else
312 if (dst[2] & 0x80) {
313 dst[0] = 0xff;
314 dst[1] = 0xff;
315 }
316#endif
317 ret = 0; 311 ret = 0;
318 break; 312 break;
319 } 313 }
@@ -332,11 +326,14 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
332 * emulate the instruction in the delay slot 326 * emulate the instruction in the delay slot
333 * - fetches the instruction from PC+2 327 * - fetches the instruction from PC+2
334 */ 328 */
335static inline int handle_unaligned_delayslot(struct pt_regs *regs) 329static inline int handle_delayslot(struct pt_regs *regs,
330 opcode_t old_instruction,
331 struct mem_access *ma)
336{ 332{
337 u16 instruction; 333 opcode_t instruction;
334 void *addr = (void *)(regs->pc + instruction_size(old_instruction));
338 335
339 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { 336 if (copy_from_user(&instruction, addr, sizeof(instruction))) {
340 /* the instruction-fetch faulted */ 337 /* the instruction-fetch faulted */
341 if (user_mode(regs)) 338 if (user_mode(regs))
342 return -EFAULT; 339 return -EFAULT;
@@ -346,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
346 regs, 0); 343 regs, 0);
347 } 344 }
348 345
349 return handle_unaligned_ins(instruction,regs); 346 return handle_unaligned_ins(instruction, regs, ma);
350} 347}
351 348
352/* 349/*
@@ -369,10 +366,11 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
369 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit 366 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
370 * opcodes.. 367 * opcodes..
371 */ 368 */
372#ifndef CONFIG_CPU_SH2A 369
373static int handle_unaligned_notify_count = 10; 370static int handle_unaligned_notify_count = 10;
374 371
375static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) 372int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
373 struct mem_access *ma)
376{ 374{
377 u_int rm; 375 u_int rm;
378 int ret, index; 376 int ret, index;
@@ -387,7 +385,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
387 printk(KERN_NOTICE "Fixing up unaligned userspace access " 385 printk(KERN_NOTICE "Fixing up unaligned userspace access "
388 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 386 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
389 current->comm, task_pid_nr(current), 387 current->comm, task_pid_nr(current),
390 (u16 *)regs->pc, instruction); 388 (void *)regs->pc, instruction);
391 } 389 }
392 390
393 ret = -EFAULT; 391 ret = -EFAULT;
@@ -395,19 +393,19 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
395 case 0x0000: 393 case 0x0000:
396 if (instruction==0x000B) { 394 if (instruction==0x000B) {
397 /* rts */ 395 /* rts */
398 ret = handle_unaligned_delayslot(regs); 396 ret = handle_delayslot(regs, instruction, ma);
399 if (ret==0) 397 if (ret==0)
400 regs->pc = regs->pr; 398 regs->pc = regs->pr;
401 } 399 }
402 else if ((instruction&0x00FF)==0x0023) { 400 else if ((instruction&0x00FF)==0x0023) {
403 /* braf @Rm */ 401 /* braf @Rm */
404 ret = handle_unaligned_delayslot(regs); 402 ret = handle_delayslot(regs, instruction, ma);
405 if (ret==0) 403 if (ret==0)
406 regs->pc += rm + 4; 404 regs->pc += rm + 4;
407 } 405 }
408 else if ((instruction&0x00FF)==0x0003) { 406 else if ((instruction&0x00FF)==0x0003) {
409 /* bsrf @Rm */ 407 /* bsrf @Rm */
410 ret = handle_unaligned_delayslot(regs); 408 ret = handle_delayslot(regs, instruction, ma);
411 if (ret==0) { 409 if (ret==0) {
412 regs->pr = regs->pc + 4; 410 regs->pr = regs->pc + 4;
413 regs->pc += rm + 4; 411 regs->pc += rm + 4;
@@ -428,13 +426,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
428 case 0x4000: 426 case 0x4000:
429 if ((instruction&0x00FF)==0x002B) { 427 if ((instruction&0x00FF)==0x002B) {
430 /* jmp @Rm */ 428 /* jmp @Rm */
431 ret = handle_unaligned_delayslot(regs); 429 ret = handle_delayslot(regs, instruction, ma);
432 if (ret==0) 430 if (ret==0)
433 regs->pc = rm; 431 regs->pc = rm;
434 } 432 }
435 else if ((instruction&0x00FF)==0x000B) { 433 else if ((instruction&0x00FF)==0x000B) {
436 /* jsr @Rm */ 434 /* jsr @Rm */
437 ret = handle_unaligned_delayslot(regs); 435 ret = handle_delayslot(regs, instruction, ma);
438 if (ret==0) { 436 if (ret==0) {
439 regs->pr = regs->pc + 4; 437 regs->pr = regs->pc + 4;
440 regs->pc = rm; 438 regs->pc = rm;
@@ -461,7 +459,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
461 case 0x0B00: /* bf lab - no delayslot*/ 459 case 0x0B00: /* bf lab - no delayslot*/
462 break; 460 break;
463 case 0x0F00: /* bf/s lab */ 461 case 0x0F00: /* bf/s lab */
464 ret = handle_unaligned_delayslot(regs); 462 ret = handle_delayslot(regs, instruction, ma);
465 if (ret==0) { 463 if (ret==0) {
466#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 464#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
467 if ((regs->sr & 0x00000001) != 0) 465 if ((regs->sr & 0x00000001) != 0)
@@ -474,7 +472,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
474 case 0x0900: /* bt lab - no delayslot */ 472 case 0x0900: /* bt lab - no delayslot */
475 break; 473 break;
476 case 0x0D00: /* bt/s lab */ 474 case 0x0D00: /* bt/s lab */
477 ret = handle_unaligned_delayslot(regs); 475 ret = handle_delayslot(regs, instruction, ma);
478 if (ret==0) { 476 if (ret==0) {
479#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 477#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
480 if ((regs->sr & 0x00000001) == 0) 478 if ((regs->sr & 0x00000001) == 0)
@@ -488,13 +486,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
488 break; 486 break;
489 487
490 case 0xA000: /* bra label */ 488 case 0xA000: /* bra label */
491 ret = handle_unaligned_delayslot(regs); 489 ret = handle_delayslot(regs, instruction, ma);
492 if (ret==0) 490 if (ret==0)
493 regs->pc += SH_PC_12BIT_OFFSET(instruction); 491 regs->pc += SH_PC_12BIT_OFFSET(instruction);
494 break; 492 break;
495 493
496 case 0xB000: /* bsr label */ 494 case 0xB000: /* bsr label */
497 ret = handle_unaligned_delayslot(regs); 495 ret = handle_delayslot(regs, instruction, ma);
498 if (ret==0) { 496 if (ret==0) {
499 regs->pr = regs->pc + 4; 497 regs->pr = regs->pc + 4;
500 regs->pc += SH_PC_12BIT_OFFSET(instruction); 498 regs->pc += SH_PC_12BIT_OFFSET(instruction);
@@ -505,12 +503,11 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
505 503
506 /* handle non-delay-slot instruction */ 504 /* handle non-delay-slot instruction */
507 simple: 505 simple:
508 ret = handle_unaligned_ins(instruction,regs); 506 ret = handle_unaligned_ins(instruction, regs, ma);
509 if (ret==0) 507 if (ret==0)
510 regs->pc += instruction_size(instruction); 508 regs->pc += instruction_size(instruction);
511 return ret; 509 return ret;
512} 510}
513#endif /* CONFIG_CPU_SH2A */
514 511
515#ifdef CONFIG_CPU_HAS_SR_RB 512#ifdef CONFIG_CPU_HAS_SR_RB
516#define lookup_exception_vector(x) \ 513#define lookup_exception_vector(x) \
@@ -538,10 +535,8 @@ asmlinkage void do_address_error(struct pt_regs *regs,
538 unsigned long error_code = 0; 535 unsigned long error_code = 0;
539 mm_segment_t oldfs; 536 mm_segment_t oldfs;
540 siginfo_t info; 537 siginfo_t info;
541#ifndef CONFIG_CPU_SH2A 538 opcode_t instruction;
542 u16 instruction;
543 int tmp; 539 int tmp;
544#endif
545 540
546 /* Intentional ifdef */ 541 /* Intentional ifdef */
547#ifdef CONFIG_CPU_HAS_SR_RB 542#ifdef CONFIG_CPU_HAS_SR_RB
@@ -561,9 +556,9 @@ asmlinkage void do_address_error(struct pt_regs *regs,
561 goto uspace_segv; 556 goto uspace_segv;
562 } 557 }
563 558
564#ifndef CONFIG_CPU_SH2A
565 set_fs(USER_DS); 559 set_fs(USER_DS);
566 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { 560 if (copy_from_user(&instruction, (void *)(regs->pc),
561 sizeof(instruction))) {
567 /* Argh. Fault on the instruction itself. 562 /* Argh. Fault on the instruction itself.
568 This should never happen non-SMP 563 This should never happen non-SMP
569 */ 564 */
@@ -571,13 +566,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
571 goto uspace_segv; 566 goto uspace_segv;
572 } 567 }
573 568
574 tmp = handle_unaligned_access(instruction, regs); 569 tmp = handle_unaligned_access(instruction, regs,
570 &user_mem_access);
575 set_fs(oldfs); 571 set_fs(oldfs);
576 572
577 if (tmp==0) 573 if (tmp==0)
578 return; /* sorted */ 574 return; /* sorted */
579#endif
580
581uspace_segv: 575uspace_segv:
582 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " 576 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
583 "access (PC %lx PR %lx)\n", current->comm, regs->pc, 577 "access (PC %lx PR %lx)\n", current->comm, regs->pc,
@@ -592,9 +586,9 @@ uspace_segv:
592 if (regs->pc & 1) 586 if (regs->pc & 1)
593 die("unaligned program counter", regs, error_code); 587 die("unaligned program counter", regs, error_code);
594 588
595#ifndef CONFIG_CPU_SH2A
596 set_fs(KERNEL_DS); 589 set_fs(KERNEL_DS);
597 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { 590 if (copy_from_user(&instruction, (void *)(regs->pc),
591 sizeof(instruction))) {
598 /* Argh. Fault on the instruction itself. 592 /* Argh. Fault on the instruction itself.
599 This should never happen non-SMP 593 This should never happen non-SMP
600 */ 594 */
@@ -602,14 +596,8 @@ uspace_segv:
602 die("insn faulting in do_address_error", regs, 0); 596 die("insn faulting in do_address_error", regs, 0);
603 } 597 }
604 598
605 handle_unaligned_access(instruction, regs); 599 handle_unaligned_access(instruction, regs, &user_mem_access);
606 set_fs(oldfs); 600 set_fs(oldfs);
607#else
608 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
609 "access\n", current->comm);
610
611 force_sig(SIGSEGV, current);
612#endif
613 } 601 }
614} 602}
615 603