diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 213 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 58 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 14 |
3 files changed, 276 insertions, 9 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2a5c3fe09a95..080df907f242 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -269,6 +269,12 @@ __pabt_svc: | |||
269 | add r5, sp, #S_PC | 269 | add r5, sp, #S_PC |
270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr | 270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr |
271 | 271 | ||
272 | #if __LINUX_ARM_ARCH__ < 6 | ||
273 | @ make sure our user space atomic helper is aborted | ||
274 | cmp r2, #VIRT_OFFSET | ||
275 | bichs r3, r3, #PSR_Z_BIT | ||
276 | #endif | ||
277 | |||
272 | @ | 278 | @ |
273 | @ We are now ready to fill in the remaining blanks on the stack: | 279 | @ We are now ready to fill in the remaining blanks on the stack: |
274 | @ | 280 | @ |
@@ -499,8 +505,12 @@ ENTRY(__switch_to) | |||
499 | mra r4, r5, acc0 | 505 | mra r4, r5, acc0 |
500 | stmia ip, {r4, r5} | 506 | stmia ip, {r4, r5} |
501 | #endif | 507 | #endif |
508 | #ifdef CONFIG_HAS_TLS_REG | ||
509 | mcr p15, 0, r3, c13, c0, 3 @ set TLS register | ||
510 | #else | ||
502 | mov r4, #0xffff0fff | 511 | mov r4, #0xffff0fff |
503 | str r3, [r4, #-3] @ Set TLS ptr | 512 | str r3, [r4, #-15] @ TLS val at 0xffff0ff0 |
513 | #endif | ||
504 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register | 514 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
505 | #ifdef CONFIG_VFP | 515 | #ifdef CONFIG_VFP |
506 | @ Always disable VFP so we can lazily save/restore the old | 516 | @ Always disable VFP so we can lazily save/restore the old |
@@ -519,6 +529,207 @@ ENTRY(__switch_to) | |||
519 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously | 529 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously |
520 | 530 | ||
521 | __INIT | 531 | __INIT |
532 | |||
533 | /* | ||
534 | * User helpers. | ||
535 | * | ||
536 | * These are segment of kernel provided user code reachable from user space | ||
537 | * at a fixed address in kernel memory. This is used to provide user space | ||
538 | * with some operations which require kernel help because of unimplemented | ||
539 | * native feature and/or instructions in many ARM CPUs. The idea is for | ||
540 | * this code to be executed directly in user mode for best efficiency but | ||
541 | * which is too intimate with the kernel counter part to be left to user | ||
542 | * libraries. In fact this code might even differ from one CPU to another | ||
543 | * depending on the available instruction set and restrictions like on | ||
544 | * SMP systems. In other words, the kernel reserves the right to change | ||
545 | * this code as needed without warning. Only the entry points and their | ||
546 | * results are guaranteed to be stable. | ||
547 | * | ||
548 | * Each segment is 32-byte aligned and will be moved to the top of the high | ||
549 | * vector page. New segments (if ever needed) must be added in front of | ||
550 | * existing ones. This mechanism should be used only for things that are | ||
551 | * really small and justified, and not be abused freely. | ||
552 | * | ||
553 | * User space is expected to implement those things inline when optimizing | ||
554 | * for a processor that has the necessary native support, but only if such | ||
555 | * resulting binaries are already to be incompatible with earlier ARM | ||
556 | * processors due to the use of unsupported instructions other than what | ||
557 | * is provided here. In other words don't make binaries unable to run on | ||
558 | * earlier processors just for the sake of not using these kernel helpers | ||
559 | * if your compiled code is not going to use the new instructions for other | ||
560 | * purpose. | ||
561 | */ | ||
562 | |||
563 | .align 5 | ||
564 | .globl __kuser_helper_start | ||
565 | __kuser_helper_start: | ||
566 | |||
567 | /* | ||
568 | * Reference prototype: | ||
569 | * | ||
570 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | ||
571 | * | ||
572 | * Input: | ||
573 | * | ||
574 | * r0 = oldval | ||
575 | * r1 = newval | ||
576 | * r2 = ptr | ||
577 | * lr = return address | ||
578 | * | ||
579 | * Output: | ||
580 | * | ||
581 | * r0 = returned value (zero or non-zero) | ||
582 | * C flag = set if r0 == 0, clear if r0 != 0 | ||
583 | * | ||
584 | * Clobbered: | ||
585 | * | ||
586 | * r3, ip, flags | ||
587 | * | ||
588 | * Definition and user space usage example: | ||
589 | * | ||
590 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | ||
591 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | ||
592 | * | ||
593 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | ||
594 | * Return zero if *ptr was changed or non-zero if no exchange happened. | ||
595 | * The C flag is also set if *ptr was changed to allow for assembly | ||
596 | * optimization in the calling code. | ||
597 | * | ||
598 | * For example, a user space atomic_add implementation could look like this: | ||
599 | * | ||
600 | * #define atomic_add(ptr, val) \ | ||
601 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | ||
602 | * register unsigned int __result asm("r1"); \ | ||
603 | * asm volatile ( \ | ||
604 | * "1: @ atomic_add\n\t" \ | ||
605 | * "ldr r0, [r2]\n\t" \ | ||
606 | * "mov r3, #0xffff0fff\n\t" \ | ||
607 | * "add lr, pc, #4\n\t" \ | ||
608 | * "add r1, r0, %2\n\t" \ | ||
609 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | ||
610 | * "bcc 1b" \ | ||
611 | * : "=&r" (__result) \ | ||
612 | * : "r" (__ptr), "rIL" (val) \ | ||
613 | * : "r0","r3","ip","lr","cc","memory" ); \ | ||
614 | * __result; }) | ||
615 | */ | ||
616 | |||
617 | __kuser_cmpxchg: @ 0xffff0fc0 | ||
618 | |||
619 | #if __LINUX_ARM_ARCH__ < 6 | ||
620 | |||
621 | #ifdef CONFIG_SMP /* sanity check */ | ||
622 | #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" | ||
623 | #endif | ||
624 | |||
625 | /* | ||
626 | * Theory of operation: | ||
627 | * | ||
628 | * We set the Z flag before loading oldval. If ever an exception | ||
629 | * occurs we can not be sure the loaded value will still be the same | ||
630 | * when the exception returns, therefore the user exception handler | ||
631 | * will clear the Z flag whenever the interrupted user code was | ||
632 | * actually from the kernel address space (see the usr_entry macro). | ||
633 | * | ||
634 | * The post-increment on the str is used to prevent a race with an | ||
635 | * exception happening just after the str instruction which would | ||
636 | * clear the Z flag although the exchange was done. | ||
637 | */ | ||
638 | teq ip, ip @ set Z flag | ||
639 | ldr ip, [r2] @ load current val | ||
640 | add r3, r2, #1 @ prepare store ptr | ||
641 | teqeq ip, r0 @ compare with oldval if still allowed | ||
642 | streq r1, [r3, #-1]! @ store newval if still allowed | ||
643 | subs r0, r2, r3 @ if r2 == r3 the str occured | ||
644 | mov pc, lr | ||
645 | |||
646 | #else | ||
647 | |||
648 | ldrex r3, [r2] | ||
649 | subs r3, r3, r0 | ||
650 | strexeq r3, r1, [r2] | ||
651 | rsbs r0, r3, #0 | ||
652 | mov pc, lr | ||
653 | |||
654 | #endif | ||
655 | |||
656 | .align 5 | ||
657 | |||
658 | /* | ||
659 | * Reference prototype: | ||
660 | * | ||
661 | * int __kernel_get_tls(void) | ||
662 | * | ||
663 | * Input: | ||
664 | * | ||
665 | * lr = return address | ||
666 | * | ||
667 | * Output: | ||
668 | * | ||
669 | * r0 = TLS value | ||
670 | * | ||
671 | * Clobbered: | ||
672 | * | ||
673 | * the Z flag might be lost | ||
674 | * | ||
675 | * Definition and user space usage example: | ||
676 | * | ||
677 | * typedef int (__kernel_get_tls_t)(void); | ||
678 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | ||
679 | * | ||
680 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
681 | * | ||
682 | * This could be used as follows: | ||
683 | * | ||
684 | * #define __kernel_get_tls() \ | ||
685 | * ({ register unsigned int __val asm("r0"); \ | ||
686 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | ||
687 | * : "=r" (__val) : : "lr","cc" ); \ | ||
688 | * __val; }) | ||
689 | */ | ||
690 | |||
691 | __kuser_get_tls: @ 0xffff0fe0 | ||
692 | |||
693 | #ifndef CONFIG_HAS_TLS_REG | ||
694 | |||
695 | #ifdef CONFIG_SMP /* sanity check */ | ||
696 | #error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong" | ||
697 | #endif | ||
698 | |||
699 | ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 | ||
700 | mov pc, lr | ||
701 | |||
702 | #else | ||
703 | |||
704 | mrc p15, 0, r0, c13, c0, 3 @ read TLS register | ||
705 | mov pc, lr | ||
706 | |||
707 | #endif | ||
708 | |||
709 | .rep 5 | ||
710 | .word 0 @ pad up to __kuser_helper_version | ||
711 | .endr | ||
712 | |||
713 | /* | ||
714 | * Reference declaration: | ||
715 | * | ||
716 | * extern unsigned int __kernel_helper_version; | ||
717 | * | ||
718 | * Definition and user space usage example: | ||
719 | * | ||
720 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | ||
721 | * | ||
722 | * User space may read this to determine the curent number of helpers | ||
723 | * available. | ||
724 | */ | ||
725 | |||
726 | __kuser_helper_version: @ 0xffff0ffc | ||
727 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | ||
728 | |||
729 | .globl __kuser_helper_end | ||
730 | __kuser_helper_end: | ||
731 | |||
732 | |||
522 | /* | 733 | /* |
523 | * Vector stubs. | 734 | * Vector stubs. |
524 | * | 735 | * |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0078aeb85737..3a001fe5540b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -450,13 +450,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
450 | 450 | ||
451 | case NR(set_tls): | 451 | case NR(set_tls): |
452 | thread->tp_value = regs->ARM_r0; | 452 | thread->tp_value = regs->ARM_r0; |
453 | #ifdef CONFIG_HAS_TLS_REG | ||
454 | asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); | ||
455 | #else | ||
453 | /* | 456 | /* |
454 | * Our user accessible TLS ptr is located at 0xffff0ffc. | 457 | * User space must never try to access this directly. |
455 | * On SMP read access to this address must raise a fault | 458 | * Expect your app to break eventually if you do so. |
456 | * and be emulated from the data abort handler. | 459 | * The user helper at 0xffff0fe0 must be used instead. |
457 | * m | 460 | * (see entry-armv.S for details) |
458 | */ | 461 | */ |
459 | *((unsigned long *)0xffff0ffc) = thread->tp_value; | 462 | *((unsigned int *)0xffff0ff0) = regs->ARM_r0; |
463 | #endif | ||
460 | return 0; | 464 | return 0; |
461 | 465 | ||
462 | default: | 466 | default: |
@@ -493,6 +497,41 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
493 | return 0; | 497 | return 0; |
494 | } | 498 | } |
495 | 499 | ||
500 | #if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG) | ||
501 | |||
502 | /* | ||
503 | * We might be running on an ARMv6+ processor which should have the TLS | ||
504 | * register, but for some reason we can't use it and have to emulate it. | ||
505 | */ | ||
506 | |||
507 | static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | ||
508 | { | ||
509 | int reg = (instr >> 12) & 15; | ||
510 | if (reg == 15) | ||
511 | return 1; | ||
512 | regs->uregs[reg] = current_thread_info()->tp_value; | ||
513 | regs->ARM_pc += 4; | ||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static struct undef_hook arm_mrc_hook = { | ||
518 | .instr_mask = 0x0fff0fff, | ||
519 | .instr_val = 0x0e1d0f70, | ||
520 | .cpsr_mask = PSR_T_BIT, | ||
521 | .cpsr_val = 0, | ||
522 | .fn = get_tp_trap, | ||
523 | }; | ||
524 | |||
525 | static int __init arm_mrc_hook_init(void) | ||
526 | { | ||
527 | register_undef_hook(&arm_mrc_hook); | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | late_initcall(arm_mrc_hook_init); | ||
532 | |||
533 | #endif | ||
534 | |||
496 | void __bad_xchg(volatile void *ptr, int size) | 535 | void __bad_xchg(volatile void *ptr, int size) |
497 | { | 536 | { |
498 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", | 537 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", |
@@ -580,14 +619,17 @@ void __init trap_init(void) | |||
580 | { | 619 | { |
581 | extern char __stubs_start[], __stubs_end[]; | 620 | extern char __stubs_start[], __stubs_end[]; |
582 | extern char __vectors_start[], __vectors_end[]; | 621 | extern char __vectors_start[], __vectors_end[]; |
622 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
623 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
583 | 624 | ||
584 | /* | 625 | /* |
585 | * Copy the vectors and stubs (in entry-armv.S) into the | 626 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
586 | * vector page, mapped at 0xffff0000, and ensure these are | 627 | * into the vector page, mapped at 0xffff0000, and ensure these |
587 | * visible to the instruction stream. | 628 | * are visible to the instruction stream. |
588 | */ | 629 | */ |
589 | memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); | 630 | memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); |
590 | memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); | 631 | memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); |
632 | memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
591 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); | 633 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); |
592 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 634 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
593 | } | 635 | } |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5b670c9ac5ef..007766a0644c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -409,3 +409,17 @@ config CPU_BPREDICT_DISABLE | |||
409 | depends on CPU_ARM1020 | 409 | depends on CPU_ARM1020 |
410 | help | 410 | help |
411 | Say Y here to disable branch prediction. If unsure, say N. | 411 | Say Y here to disable branch prediction. If unsure, say N. |
412 | |||
413 | config HAS_TLS_REG | ||
414 | bool | ||
415 | depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3 | ||
416 | help | ||
417 | This selects support for the CP15 thread register. | ||
418 | It is defined to be available on ARMv6 or later. However | ||
419 | if the kernel is configured to support multiple CPUs including | ||
420 | a pre-ARMv6 processors, or if a given ARMv6 processor doesn't | ||
421 | implement the thread register for some reason, then access to | ||
422 | this register from user space must be trapped and emulated. | ||
423 | If user space is relying on the __kuser_get_tls code then | ||
424 | there should not be any impact. | ||
425 | |||