aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-05-10 04:48:34 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-05-10 04:48:34 -0400
commit1fdc08abfa26f30fcef0ce1333e9ac6f80350f30 (patch)
treed54c5778e0a74dbea37904ac9106604a22985c36 /arch/arm
parentbd0493eaaf5c7a1ea00786d46cc2f4af44e76f28 (diff)
ARM: decompressor: avoid speculative prefetch from non-RAM areas
We setup identity MMU mappings across the entire 4GB of space, which are permissionless because the domain is set to manager. This unfortunately allows ARMv6 and later CPUs to speculatively prefetch from the entire address space, which can cause undesirable side effects if those regions contain devices. As we setup the mappings with read/write permission, we can switch the domain to client mode, and then use the XN bit for ARMv6 and above to control speculative prefetch to non-RAM areas. Reported-by: R Sricharan <r.sricharan@ti.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/boot/compressed/head.S29
1 files changed, 18 insertions, 11 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index dc7e8ce8e6be..5ad33a4df675 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
567 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 567 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
568 mov pc, lr 568 mov pc, lr
569 569
570#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
571#define CB_BITS 0x08
572#else
573#define CB_BITS 0x0c
574#endif
575
570__setup_mmu: sub r3, r4, #16384 @ Page directory size 576__setup_mmu: sub r3, r4, #16384 @ Page directory size
571 bic r3, r3, #0xff @ Align the pointer 577 bic r3, r3, #0xff @ Align the pointer
572 bic r3, r3, #0x3f00 578 bic r3, r3, #0x3f00
@@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
578 mov r9, r0, lsr #18 584 mov r9, r0, lsr #18
579 mov r9, r9, lsl #18 @ start of RAM 585 mov r9, r9, lsl #18 @ start of RAM
580 add r10, r9, #0x10000000 @ a reasonable RAM size 586 add r10, r9, #0x10000000 @ a reasonable RAM size
581 mov r1, #0x12 587 mov r1, #0x12 @ XN|U + section mapping
582 orr r1, r1, #3 << 10 588 orr r1, r1, #3 << 10 @ AP=11
583 add r2, r3, #16384 589 add r2, r3, #16384
5841: cmp r1, r9 @ if virt > start of RAM 5901: cmp r1, r9 @ if virt > start of RAM
585#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 591 cmphs r10, r1 @ && end of RAM > virt
586 orrhs r1, r1, #0x08 @ set cacheable 592 bic r1, r1, #0x1c @ clear XN|U + C + B
587#else 593 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
588 orrhs r1, r1, #0x0c @ set cacheable, bufferable 594 orrhs r1, r1, r6 @ set RAM section settings
589#endif
590 cmp r1, r10 @ if virt > end of RAM
591 bichs r1, r1, #0x0c @ clear cacheable, bufferable
592 str r1, [r0], #4 @ 1:1 mapping 595 str r1, [r0], #4 @ 1:1 mapping
593 add r1, r1, #1048576 596 add r1, r1, #1048576
594 teq r0, r2 597 teq r0, r2
@@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
599 * so there is no map overlap problem for up to 1 MB compressed kernel. 602 * so there is no map overlap problem for up to 1 MB compressed kernel.
600 * If the execution is in RAM then we would only be duplicating the above. 603 * If the execution is in RAM then we would only be duplicating the above.
601 */ 604 */
602 mov r1, #0x1e 605 orr r1, r6, #0x04 @ ensure B is set for this
603 orr r1, r1, #3 << 10 606 orr r1, r1, #3 << 10
604 mov r2, pc 607 mov r2, pc
605 mov r2, r2, lsr #20 608 mov r2, r2, lsr #20
@@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
620__armv4_mmu_cache_on: 623__armv4_mmu_cache_on:
621 mov r12, lr 624 mov r12, lr
622#ifdef CONFIG_MMU 625#ifdef CONFIG_MMU
626 mov r6, #CB_BITS | 0x12 @ U
623 bl __setup_mmu 627 bl __setup_mmu
624 mov r0, #0 628 mov r0, #0
625 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 629 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
641#ifdef CONFIG_MMU 645#ifdef CONFIG_MMU
642 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 646 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
643 tst r11, #0xf @ VMSA 647 tst r11, #0xf @ VMSA
648 movne r6, #CB_BITS | 0x02 @ !XN
644 blne __setup_mmu 649 blne __setup_mmu
645 mov r0, #0 650 mov r0, #0
646 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 651 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
655 orr r0, r0, #1 << 25 @ big-endian page tables 660 orr r0, r0, #1 << 25 @ big-endian page tables
656#endif 661#endif
657 orrne r0, r0, #1 @ MMU enabled 662 orrne r0, r0, #1 @ MMU enabled
658 movne r1, #-1 663 movne r1, #0xfffffffd @ domain 0 = client
659 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 664 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
660 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 665 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
661#endif 666#endif
@@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
668 673
669__fa526_cache_on: 674__fa526_cache_on:
670 mov r12, lr 675 mov r12, lr
676 mov r6, #CB_BITS | 0x12 @ U
671 bl __setup_mmu 677 bl __setup_mmu
672 mov r0, #0 678 mov r0, #0
673 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 679 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
@@ -682,6 +688,7 @@ __fa526_cache_on:
682 688
683__arm6_mmu_cache_on: 689__arm6_mmu_cache_on:
684 mov r12, lr 690 mov r12, lr
691 mov r6, #CB_BITS | 0x12 @ U
685 bl __setup_mmu 692 bl __setup_mmu
686 mov r0, #0 693 mov r0, #0
687 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 694 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3