aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/compressed/head.S167
1 files changed, 99 insertions, 68 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 82f5fcfd9567..bd60e8369879 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -140,7 +140,8 @@ start:
140 tst r2, #3 @ not user? 140 tst r2, #3 @ not user?
141 bne not_angel 141 bne not_angel
142 mov r0, #0x17 @ angel_SWIreason_EnterSVC 142 mov r0, #0x17 @ angel_SWIreason_EnterSVC
143 swi 0x123456 @ angel_SWI_ARM 143 ARM( swi 0x123456 ) @ angel_SWI_ARM
144 THUMB( svc 0xab ) @ angel_SWI_THUMB
144not_angel: 145not_angel:
145 mrs r2, cpsr @ turn off interrupts to 146 mrs r2, cpsr @ turn off interrupts to
146 orr r2, r2, #0xc0 @ prevent angel from running 147 orr r2, r2, #0xc0 @ prevent angel from running
@@ -161,7 +162,9 @@ not_angel:
161 162
162 .text 163 .text
163 adr r0, LC0 164 adr r0, LC0
164 ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} 165 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
166 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
167 THUMB( ldr sp, [r0, #28] )
165 subs r0, r0, r1 @ calculate the delta offset 168 subs r0, r0, r1 @ calculate the delta offset
166 169
167 @ if delta is zero, we are 170 @ if delta is zero, we are
@@ -263,22 +266,25 @@ not_relocated: mov r0, #0
263 * r6 = processor ID 266 * r6 = processor ID
264 * r7 = architecture ID 267 * r7 = architecture ID
265 * r8 = atags pointer 268 * r8 = atags pointer
266 * r9-r14 = corrupted 269 * r9-r12,r14 = corrupted
267 */ 270 */
268 add r1, r5, r0 @ end of decompressed kernel 271 add r1, r5, r0 @ end of decompressed kernel
269 adr r2, reloc_start 272 adr r2, reloc_start
270 ldr r3, LC1 273 ldr r3, LC1
271 add r3, r2, r3 274 add r3, r2, r3
2721: ldmia r2!, {r9 - r14} @ copy relocation code 2751: ldmia r2!, {r9 - r12, r14} @ copy relocation code
273 stmia r1!, {r9 - r14} 276 stmia r1!, {r9 - r12, r14}
274 ldmia r2!, {r9 - r14} 277 ldmia r2!, {r9 - r12, r14}
275 stmia r1!, {r9 - r14} 278 stmia r1!, {r9 - r12, r14}
276 cmp r2, r3 279 cmp r2, r3
277 blo 1b 280 blo 1b
278 add sp, r1, #128 @ relocate the stack 281 mov sp, r1
282 add sp, sp, #128 @ relocate the stack
279 283
280 bl cache_clean_flush 284 bl cache_clean_flush
281 add pc, r5, r0 @ call relocation code 285 ARM( add pc, r5, r0 ) @ call relocation code
286 THUMB( add r12, r5, r0 )
287 THUMB( mov pc, r12 ) @ call relocation code
282 288
283/* 289/*
284 * We're not in danger of overwriting ourselves. Do this the simple way. 290 * We're not in danger of overwriting ourselves. Do this the simple way.
@@ -499,6 +505,7 @@ __arm6_mmu_cache_on:
499 mov pc, r12 505 mov pc, r12
500 506
501__common_mmu_cache_on: 507__common_mmu_cache_on:
508#ifndef CONFIG_THUMB2_KERNEL
502#ifndef DEBUG 509#ifndef DEBUG
503 orr r0, r0, #0x000d @ Write buffer, mmu 510 orr r0, r0, #0x000d @ Write buffer, mmu
504#endif 511#endif
@@ -510,6 +517,7 @@ __common_mmu_cache_on:
5101: mcr p15, 0, r0, c1, c0, 0 @ load control register 5171: mcr p15, 0, r0, c1, c0, 0 @ load control register
511 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 518 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
512 sub pc, lr, r0, lsr #32 @ properly flush pipeline 519 sub pc, lr, r0, lsr #32 @ properly flush pipeline
520#endif
513 521
514/* 522/*
515 * All code following this line is relocatable. It is relocated by 523 * All code following this line is relocatable. It is relocated by
@@ -523,7 +531,7 @@ __common_mmu_cache_on:
523 * r6 = processor ID 531 * r6 = processor ID
524 * r7 = architecture ID 532 * r7 = architecture ID
525 * r8 = atags pointer 533 * r8 = atags pointer
526 * r9-r14 = corrupted 534 * r9-r12,r14 = corrupted
527 */ 535 */
528 .align 5 536 .align 5
529reloc_start: add r9, r5, r0 537reloc_start: add r9, r5, r0
@@ -532,13 +540,14 @@ reloc_start: add r9, r5, r0
532 mov r1, r4 540 mov r1, r4
5331: 5411:
534 .rept 4 542 .rept 4
535 ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel 543 ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
536 stmia r1!, {r0, r2, r3, r10 - r14} 544 stmia r1!, {r0, r2, r3, r10 - r12, r14}
537 .endr 545 .endr
538 546
539 cmp r5, r9 547 cmp r5, r9
540 blo 1b 548 blo 1b
541 add sp, r1, #128 @ relocate the stack 549 mov sp, r1
550 add sp, sp, #128 @ relocate the stack
542 debug_reloc_end 551 debug_reloc_end
543 552
544call_kernel: bl cache_clean_flush 553call_kernel: bl cache_clean_flush
@@ -572,7 +581,9 @@ call_cache_fn: adr r12, proc_types
572 ldr r2, [r12, #4] @ get mask 581 ldr r2, [r12, #4] @ get mask
573 eor r1, r1, r6 @ (real ^ match) 582 eor r1, r1, r6 @ (real ^ match)
574 tst r1, r2 @ & mask 583 tst r1, r2 @ & mask
575 addeq pc, r12, r3 @ call cache function 584 ARM( addeq pc, r12, r3 ) @ call cache function
585 THUMB( addeq r12, r3 )
586 THUMB( moveq pc, r12 ) @ call cache function
576 add r12, r12, #4*5 587 add r12, r12, #4*5
577 b 1b 588 b 1b
578 589
@@ -595,9 +606,10 @@ call_cache_fn: adr r12, proc_types
595proc_types: 606proc_types:
596 .word 0x41560600 @ ARM6/610 607 .word 0x41560600 @ ARM6/610
597 .word 0xffffffe0 608 .word 0xffffffe0
598 b __arm6_mmu_cache_off @ works, but slow 609 W(b) __arm6_mmu_cache_off @ works, but slow
599 b __arm6_mmu_cache_off 610 W(b) __arm6_mmu_cache_off
600 mov pc, lr 611 mov pc, lr
612 THUMB( nop )
601@ b __arm6_mmu_cache_on @ untested 613@ b __arm6_mmu_cache_on @ untested
602@ b __arm6_mmu_cache_off 614@ b __arm6_mmu_cache_off
603@ b __armv3_mmu_cache_flush 615@ b __armv3_mmu_cache_flush
@@ -605,76 +617,84 @@ proc_types:
605 .word 0x00000000 @ old ARM ID 617 .word 0x00000000 @ old ARM ID
606 .word 0x0000f000 618 .word 0x0000f000
607 mov pc, lr 619 mov pc, lr
620 THUMB( nop )
608 mov pc, lr 621 mov pc, lr
622 THUMB( nop )
609 mov pc, lr 623 mov pc, lr
624 THUMB( nop )
610 625
611 .word 0x41007000 @ ARM7/710 626 .word 0x41007000 @ ARM7/710
612 .word 0xfff8fe00 627 .word 0xfff8fe00
613 b __arm7_mmu_cache_off 628 W(b) __arm7_mmu_cache_off
614 b __arm7_mmu_cache_off 629 W(b) __arm7_mmu_cache_off
615 mov pc, lr 630 mov pc, lr
631 THUMB( nop )
616 632
617 .word 0x41807200 @ ARM720T (writethrough) 633 .word 0x41807200 @ ARM720T (writethrough)
618 .word 0xffffff00 634 .word 0xffffff00
619 b __armv4_mmu_cache_on 635 W(b) __armv4_mmu_cache_on
620 b __armv4_mmu_cache_off 636 W(b) __armv4_mmu_cache_off
621 mov pc, lr 637 mov pc, lr
638 THUMB( nop )
622 639
623 .word 0x41007400 @ ARM74x 640 .word 0x41007400 @ ARM74x
624 .word 0xff00ff00 641 .word 0xff00ff00
625 b __armv3_mpu_cache_on 642 W(b) __armv3_mpu_cache_on
626 b __armv3_mpu_cache_off 643 W(b) __armv3_mpu_cache_off
627 b __armv3_mpu_cache_flush 644 W(b) __armv3_mpu_cache_flush
628 645
629 .word 0x41009400 @ ARM94x 646 .word 0x41009400 @ ARM94x
630 .word 0xff00ff00 647 .word 0xff00ff00
631 b __armv4_mpu_cache_on 648 W(b) __armv4_mpu_cache_on
632 b __armv4_mpu_cache_off 649 W(b) __armv4_mpu_cache_off
633 b __armv4_mpu_cache_flush 650 W(b) __armv4_mpu_cache_flush
634 651
635 .word 0x00007000 @ ARM7 IDs 652 .word 0x00007000 @ ARM7 IDs
636 .word 0x0000f000 653 .word 0x0000f000
637 mov pc, lr 654 mov pc, lr
655 THUMB( nop )
638 mov pc, lr 656 mov pc, lr
657 THUMB( nop )
639 mov pc, lr 658 mov pc, lr
659 THUMB( nop )
640 660
641 @ Everything from here on will be the new ID system. 661 @ Everything from here on will be the new ID system.
642 662
643 .word 0x4401a100 @ sa110 / sa1100 663 .word 0x4401a100 @ sa110 / sa1100
644 .word 0xffffffe0 664 .word 0xffffffe0
645 b __armv4_mmu_cache_on 665 W(b) __armv4_mmu_cache_on
646 b __armv4_mmu_cache_off 666 W(b) __armv4_mmu_cache_off
647 b __armv4_mmu_cache_flush 667 W(b) __armv4_mmu_cache_flush
648 668
649 .word 0x6901b110 @ sa1110 669 .word 0x6901b110 @ sa1110
650 .word 0xfffffff0 670 .word 0xfffffff0
651 b __armv4_mmu_cache_on 671 W(b) __armv4_mmu_cache_on
652 b __armv4_mmu_cache_off 672 W(b) __armv4_mmu_cache_off
653 b __armv4_mmu_cache_flush 673 W(b) __armv4_mmu_cache_flush
654 674
655 .word 0x56056930 675 .word 0x56056930
656 .word 0xff0ffff0 @ PXA935 676 .word 0xff0ffff0 @ PXA935
657 b __armv4_mmu_cache_on 677 W(b) __armv4_mmu_cache_on
658 b __armv4_mmu_cache_off 678 W(b) __armv4_mmu_cache_off
659 b __armv4_mmu_cache_flush 679 W(b) __armv4_mmu_cache_flush
660 680
661 .word 0x56158000 @ PXA168 681 .word 0x56158000 @ PXA168
662 .word 0xfffff000 682 .word 0xfffff000
663 b __armv4_mmu_cache_on 683 W(b) __armv4_mmu_cache_on
664 b __armv4_mmu_cache_off 684 W(b) __armv4_mmu_cache_off
665 b __armv5tej_mmu_cache_flush 685 W(b) __armv5tej_mmu_cache_flush
666 686
667 .word 0x56056930 687 .word 0x56056930
668 .word 0xff0ffff0 @ PXA935 688 .word 0xff0ffff0 @ PXA935
669 b __armv4_mmu_cache_on 689 W(b) __armv4_mmu_cache_on
670 b __armv4_mmu_cache_off 690 W(b) __armv4_mmu_cache_off
671 b __armv4_mmu_cache_flush 691 W(b) __armv4_mmu_cache_flush
672 692
673 .word 0x56050000 @ Feroceon 693 .word 0x56050000 @ Feroceon
674 .word 0xff0f0000 694 .word 0xff0f0000
675 b __armv4_mmu_cache_on 695 W(b) __armv4_mmu_cache_on
676 b __armv4_mmu_cache_off 696 W(b) __armv4_mmu_cache_off
677 b __armv5tej_mmu_cache_flush 697 W(b) __armv5tej_mmu_cache_flush
678 698
679#ifdef CONFIG_CPU_FEROCEON_OLD_ID 699#ifdef CONFIG_CPU_FEROCEON_OLD_ID
680 /* this conflicts with the standard ARMv5TE entry */ 700 /* this conflicts with the standard ARMv5TE entry */
@@ -687,47 +707,50 @@ proc_types:
687 707
688 .word 0x66015261 @ FA526 708 .word 0x66015261 @ FA526
689 .word 0xff01fff1 709 .word 0xff01fff1
690 b __fa526_cache_on 710 W(b) __fa526_cache_on
691 b __armv4_mmu_cache_off 711 W(b) __armv4_mmu_cache_off
692 b __fa526_cache_flush 712 W(b) __fa526_cache_flush
693 713
694 @ These match on the architecture ID 714 @ These match on the architecture ID
695 715
696 .word 0x00020000 @ ARMv4T 716 .word 0x00020000 @ ARMv4T
697 .word 0x000f0000 717 .word 0x000f0000
698 b __armv4_mmu_cache_on 718 W(b) __armv4_mmu_cache_on
699 b __armv4_mmu_cache_off 719 W(b) __armv4_mmu_cache_off
700 b __armv4_mmu_cache_flush 720 W(b) __armv4_mmu_cache_flush
701 721
702 .word 0x00050000 @ ARMv5TE 722 .word 0x00050000 @ ARMv5TE
703 .word 0x000f0000 723 .word 0x000f0000
704 b __armv4_mmu_cache_on 724 W(b) __armv4_mmu_cache_on
705 b __armv4_mmu_cache_off 725 W(b) __armv4_mmu_cache_off
706 b __armv4_mmu_cache_flush 726 W(b) __armv4_mmu_cache_flush
707 727
708 .word 0x00060000 @ ARMv5TEJ 728 .word 0x00060000 @ ARMv5TEJ
709 .word 0x000f0000 729 .word 0x000f0000
710 b __armv4_mmu_cache_on 730 W(b) __armv4_mmu_cache_on
711 b __armv4_mmu_cache_off 731 W(b) __armv4_mmu_cache_off
712 b __armv5tej_mmu_cache_flush 732 W(b) __armv4_mmu_cache_flush
713 733
714 .word 0x0007b000 @ ARMv6 734 .word 0x0007b000 @ ARMv6
715 .word 0x000ff000 735 .word 0x000ff000
716 b __armv4_mmu_cache_on 736 W(b) __armv4_mmu_cache_on
717 b __armv4_mmu_cache_off 737 W(b) __armv4_mmu_cache_off
718 b __armv6_mmu_cache_flush 738 W(b) __armv6_mmu_cache_flush
719 739
720 .word 0x000f0000 @ new CPU Id 740 .word 0x000f0000 @ new CPU Id
721 .word 0x000f0000 741 .word 0x000f0000
722 b __armv7_mmu_cache_on 742 W(b) __armv7_mmu_cache_on
723 b __armv7_mmu_cache_off 743 W(b) __armv7_mmu_cache_off
724 b __armv7_mmu_cache_flush 744 W(b) __armv7_mmu_cache_flush
725 745
726 .word 0 @ unrecognised type 746 .word 0 @ unrecognised type
727 .word 0 747 .word 0
728 mov pc, lr 748 mov pc, lr
749 THUMB( nop )
729 mov pc, lr 750 mov pc, lr
751 THUMB( nop )
730 mov pc, lr 752 mov pc, lr
753 THUMB( nop )
731 754
732 .size proc_types, . - proc_types 755 .size proc_types, . - proc_types
733 756
@@ -854,7 +877,7 @@ __armv7_mmu_cache_flush:
854 b iflush 877 b iflush
855hierarchical: 878hierarchical:
856 mcr p15, 0, r10, c7, c10, 5 @ DMB 879 mcr p15, 0, r10, c7, c10, 5 @ DMB
857 stmfd sp!, {r0-r5, r7, r9, r11} 880 stmfd sp!, {r0-r7, r9-r11}
858 mrc p15, 1, r0, c0, c0, 1 @ read clidr 881 mrc p15, 1, r0, c0, c0, 1 @ read clidr
859 ands r3, r0, #0x7000000 @ extract loc from clidr 882 ands r3, r0, #0x7000000 @ extract loc from clidr
860 mov r3, r3, lsr #23 @ left align loc bit field 883 mov r3, r3, lsr #23 @ left align loc bit field
@@ -879,8 +902,12 @@ loop1:
879loop2: 902loop2:
880 mov r9, r4 @ create working copy of max way size 903 mov r9, r4 @ create working copy of max way size
881loop3: 904loop3:
882 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 905 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
883 orr r11, r11, r7, lsl r2 @ factor index number into r11 906 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
907 THUMB( lsl r6, r9, r5 )
908 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
909 THUMB( lsl r6, r7, r2 )
910 THUMB( orr r11, r11, r6 ) @ factor index number into r11
884 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 911 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
885 subs r9, r9, #1 @ decrement the way 912 subs r9, r9, #1 @ decrement the way
886 bge loop3 913 bge loop3
@@ -891,7 +918,7 @@ skip:
891 cmp r3, r10 918 cmp r3, r10
892 bgt loop1 919 bgt loop1
893finished: 920finished:
894 ldmfd sp!, {r0-r5, r7, r9, r11} 921 ldmfd sp!, {r0-r7, r9-r11}
895 mov r10, #0 @ swith back to cache level 0 922 mov r10, #0 @ swith back to cache level 0
896 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 923 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
897iflush: 924iflush:
@@ -925,9 +952,13 @@ __armv4_mmu_cache_flush:
925 mov r11, #8 952 mov r11, #8
926 mov r11, r11, lsl r3 @ cache line size in bytes 953 mov r11, r11, lsl r3 @ cache line size in bytes
927no_cache_id: 954no_cache_id:
928 bic r1, pc, #63 @ align to longest cache line 955 mov r1, pc
956 bic r1, r1, #63 @ align to longest cache line
929 add r2, r1, r2 957 add r2, r1, r2
9301: ldr r3, [r1], r11 @ s/w flush D cache 9581:
959 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
960 THUMB( ldr r3, [r1] ) @ s/w flush D cache
961 THUMB( add r1, r1, r11 )
931 teq r1, r2 962 teq r1, r2
932 bne 1b 963 bne 1b
933 964