aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-11-27 07:42:48 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 07:42:48 -0500
commitf412b09f4ed7c57f5b8935ed7d6fc786f402a629 (patch)
tree34fe1b4b64db4993e9fb21a70812fafed0437870 /arch/arm
parent31bccbf39208133415000520c79ebe7b291786df (diff)
parent7f1fd31db158c95418d9cc5690ab60ecc6fb632d (diff)
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6 into devel
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/boot/compressed/head.S14
-rw-r--r--arch/arm/include/asm/cacheflush.h36
-rw-r--r--arch/arm/include/asm/hwcap.h1
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/thumbee.c2
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/vfp/vfphw.S27
-rw-r--r--arch/arm/vfp/vfpmodule.c9
10 files changed, 87 insertions, 27 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 84a1e0496a3c..7b1f31295a0a 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -717,6 +717,9 @@ __armv7_mmu_cache_off:
717 bl __armv7_mmu_cache_flush 717 bl __armv7_mmu_cache_flush
718 mov r0, #0 718 mov r0, #0
719 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 719 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
720 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
721 mcr p15, 0, r0, c7, c10, 4 @ DSB
722 mcr p15, 0, r0, c7, c5, 4 @ ISB
720 mov pc, r12 723 mov pc, r12
721 724
722__arm6_mmu_cache_off: 725__arm6_mmu_cache_off:
@@ -778,12 +781,13 @@ __armv6_mmu_cache_flush:
778__armv7_mmu_cache_flush: 781__armv7_mmu_cache_flush:
779 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 782 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
780 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 783 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
781 beq hierarchical
782 mov r10, #0 784 mov r10, #0
785 beq hierarchical
783 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 786 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
784 b iflush 787 b iflush
785hierarchical: 788hierarchical:
786 stmfd sp!, {r0-r5, r7, r9-r11} 789 mcr p15, 0, r10, c7, c10, 5 @ DMB
790 stmfd sp!, {r0-r5, r7, r9, r11}
787 mrc p15, 1, r0, c0, c0, 1 @ read clidr 791 mrc p15, 1, r0, c0, c0, 1 @ read clidr
788 ands r3, r0, #0x7000000 @ extract loc from clidr 792 ands r3, r0, #0x7000000 @ extract loc from clidr
789 mov r3, r3, lsr #23 @ left align loc bit field 793 mov r3, r3, lsr #23 @ left align loc bit field
@@ -820,12 +824,14 @@ skip:
820 cmp r3, r10 824 cmp r3, r10
821 bgt loop1 825 bgt loop1
822finished: 826finished:
827 ldmfd sp!, {r0-r5, r7, r9, r11}
823 mov r10, #0 @ swith back to cache level 0 828 mov r10, #0 @ swith back to cache level 0
824 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 829 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
825 ldmfd sp!, {r0-r5, r7, r9-r11}
826iflush: 830iflush:
831 mcr p15, 0, r10, c7, c10, 4 @ DSB
827 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 832 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
828 mcr p15, 0, r10, c7, c10, 4 @ drain WB 833 mcr p15, 0, r10, c7, c10, 4 @ DSB
834 mcr p15, 0, r10, c7, c5, 4 @ ISB
829 mov pc, lr 835 mov pc, lr
830 836
831__armv5tej_mmu_cache_flush: 837__armv5tej_mmu_cache_flush:
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index de6c59f814a1..85a2514cbffc 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
15 15
16#include <asm/glue.h> 16#include <asm/glue.h>
17#include <asm/shmparam.h> 17#include <asm/shmparam.h>
18#include <asm/cachetype.h>
18 19
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
20 21
@@ -296,16 +297,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
296#endif 297#endif
297 298
298/* 299/*
299 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
300 * vmalloc, ioremap etc) in kernel space for pages. Since the
301 * direct-mappings of these pages may contain cached data, we need
302 * to do a full cache flush to ensure that writebacks don't corrupt
303 * data placed into these pages via the new mappings.
304 */
305#define flush_cache_vmap(start, end) flush_cache_all()
306#define flush_cache_vunmap(start, end) flush_cache_all()
307
308/*
309 * Copy user data from/to a page which is mapped into a different 300 * Copy user data from/to a page which is mapped into a different
310 * processes address space. Really, we want to allow our "user 301 * processes address space. Really, we want to allow our "user
311 * space" model to handle this. 302 * space" model to handle this.
@@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
444 dmac_inv_range(start, start + size); 435 dmac_inv_range(start, start + size);
445} 436}
446 437
438/*
439 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
440 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
441 * caches, since the direct-mappings of these pages may contain cached
442 * data, we need to do a full cache flush to ensure that writebacks
443 * don't corrupt data placed into these pages via the new mappings.
444 */
445static inline void flush_cache_vmap(unsigned long start, unsigned long end)
446{
447 if (!cache_is_vipt_nonaliasing())
448 flush_cache_all();
449 else
450 /*
451 * set_pte_at() called from vmap_pte_range() does not
452 * have a DSB after cleaning the cache line.
453 */
454 dsb();
455}
456
457static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
458{
459 if (!cache_is_vipt_nonaliasing())
460 flush_cache_all();
461}
462
447#endif 463#endif
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 81f4c899a555..bda489f9f017 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -16,6 +16,7 @@
16#define HWCAP_IWMMXT 512 16#define HWCAP_IWMMXT 512
17#define HWCAP_CRUNCH 1024 17#define HWCAP_CRUNCH 1024
18#define HWCAP_THUMBEE 2048 18#define HWCAP_THUMBEE 2048
19#define HWCAP_NEON 4096
19 20
20#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 21#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
21/* 22/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1f1eecca7f55..d4dae3e9b294 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -772,6 +772,8 @@ static const char *hwcap_str[] = {
772 "java", 772 "java",
773 "iwmmxt", 773 "iwmmxt",
774 "crunch", 774 "crunch",
775 "thumbee",
776 "neon",
775 NULL 777 NULL
776}; 778};
777 779
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index df3f6b7ebcea..9cb7aaca159f 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -25,7 +25,7 @@
25/* 25/*
26 * Access to the ThumbEE Handler Base register 26 * Access to the ThumbEE Handler Base register
27 */ 27 */
28static inline unsigned long teehbr_read() 28static inline unsigned long teehbr_read(void)
29{ 29{
30 unsigned long v; 30 unsigned long v;
31 asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v)); 31 asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v));
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index d19c2bec2b1f..be93ff02a98d 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -26,6 +26,7 @@
26 * - mm - mm_struct describing address space 26 * - mm - mm_struct describing address space
27 */ 27 */
28ENTRY(v7_flush_dcache_all) 28ENTRY(v7_flush_dcache_all)
29 dmb @ ensure ordering with previous memory accesses
29 mrc p15, 1, r0, c0, c0, 1 @ read clidr 30 mrc p15, 1, r0, c0, c0, 1 @ read clidr
30 ands r3, r0, #0x7000000 @ extract loc from clidr 31 ands r3, r0, #0x7000000 @ extract loc from clidr
31 mov r3, r3, lsr #23 @ left align loc bit field 32 mov r3, r3, lsr #23 @ left align loc bit field
@@ -64,6 +65,7 @@ skip:
64finished: 65finished:
65 mov r10, #0 @ swith back to cache level 0 66 mov r10, #0 @ swith back to cache level 0
66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 67 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
68 dsb
67 isb 69 isb
68 mov pc, lr 70 mov pc, lr
69ENDPROC(v7_flush_dcache_all) 71ENDPROC(v7_flush_dcache_all)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 294943b85973..f0cc599facb7 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -71,6 +71,8 @@ ENTRY(cpu_v6_reset)
71 * IRQs are already disabled. 71 * IRQs are already disabled.
72 */ 72 */
73ENTRY(cpu_v6_do_idle) 73ENTRY(cpu_v6_do_idle)
74 mov r1, #0
75 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
74 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 76 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
75 mov pc, lr 77 mov pc, lr
76 78
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 4d3c0a73e7fb..d1ebec42521d 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -20,9 +20,17 @@
20 20
21#define TTB_C (1 << 0) 21#define TTB_C (1 << 0)
22#define TTB_S (1 << 1) 22#define TTB_S (1 << 1)
23#define TTB_RGN_NC (0 << 3)
24#define TTB_RGN_OC_WBWA (1 << 3)
23#define TTB_RGN_OC_WT (2 << 3) 25#define TTB_RGN_OC_WT (2 << 3)
24#define TTB_RGN_OC_WB (3 << 3) 26#define TTB_RGN_OC_WB (3 << 3)
25 27
28#ifndef CONFIG_SMP
29#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB
30#else
31#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA
32#endif
33
26ENTRY(cpu_v7_proc_init) 34ENTRY(cpu_v7_proc_init)
27 mov pc, lr 35 mov pc, lr
28ENDPROC(cpu_v7_proc_init) 36ENDPROC(cpu_v7_proc_init)
@@ -55,6 +63,7 @@ ENDPROC(cpu_v7_reset)
55 * IRQs are already disabled. 63 * IRQs are already disabled.
56 */ 64 */
57ENTRY(cpu_v7_do_idle) 65ENTRY(cpu_v7_do_idle)
66 dsb @ WFI may enter a low-power mode
58 wfi 67 wfi
59 mov pc, lr 68 mov pc, lr
60ENDPROC(cpu_v7_do_idle) 69ENDPROC(cpu_v7_do_idle)
@@ -85,7 +94,7 @@ ENTRY(cpu_v7_switch_mm)
85#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
86 mov r2, #0 95 mov r2, #0
87 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 96 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
88 orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB 97 orr r0, r0, #TTB_FLAGS
89 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 98 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
90 isb 99 isb
911: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 1001: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -162,6 +171,11 @@ cpu_v7_name:
162 * - cache type register is implemented 171 * - cache type register is implemented
163 */ 172 */
164__v7_setup: 173__v7_setup:
174#ifdef CONFIG_SMP
175 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
176 orr r0, r0, #(0x1 << 6)
177 mcr p15, 0, r0, c1, c0, 1
178#endif
165 adr r12, __v7_setup_stack @ the local stack 179 adr r12, __v7_setup_stack @ the local stack
166 stmia r12, {r0-r5, r7, r9, r11, lr} 180 stmia r12, {r0-r5, r7, r9, r11, lr}
167 bl v7_flush_dcache_all 181 bl v7_flush_dcache_all
@@ -174,8 +188,7 @@ __v7_setup:
174#ifdef CONFIG_MMU 188#ifdef CONFIG_MMU
175 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 189 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
176 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 190 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
177 orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB 191 orr r4, r4, #TTB_FLAGS
178 mcr p15, 0, r4, c2, c0, 0 @ load TTB0
179 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 192 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
180 mov r10, #0x1f @ domains 0, 1 = manager 193 mov r10, #0x1f @ domains 0, 1 = manager
181 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 194 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index a62dcf7098ba..3c73aafe3e01 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -101,9 +101,12 @@ ENTRY(vfp_support_entry)
101 VFPFSTMIA r4, r5 @ save the working registers 101 VFPFSTMIA r4, r5 @ save the working registers
102 VFPFMRX r5, FPSCR @ current status 102 VFPFMRX r5, FPSCR @ current status
103 tst r1, #FPEXC_EX @ is there additional state to save? 103 tst r1, #FPEXC_EX @ is there additional state to save?
104 VFPFMRX r6, FPINST, NE @ FPINST (only if FPEXC.EX is set) 104 beq 1f
105 tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? 105 VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
106 VFPFMRX r8, FPINST2, NE @ FPINST2 if needed (and present) 106 tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
107 beq 1f
108 VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
1091:
107 stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 110 stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
108 @ and point r4 at the word at the 111 @ and point r4 at the word at the
109 @ start of the register dump 112 @ start of the register dump
@@ -117,9 +120,12 @@ no_old_VFP_process:
117 @ FPEXC is in a safe state 120 @ FPEXC is in a safe state
118 ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 121 ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
119 tst r1, #FPEXC_EX @ is there additional state to restore? 122 tst r1, #FPEXC_EX @ is there additional state to restore?
120 VFPFMXR FPINST, r6, NE @ restore FPINST (only if FPEXC.EX is set) 123 beq 1f
121 tstne r1, #FPEXC_FP2V @ is there an FPINST2 to write? 124 VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
122 VFPFMXR FPINST2, r8, NE @ FPINST2 if needed (and present) 125 tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
126 beq 1f
127 VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
1281:
123 VFPFMXR FPSCR, r5 @ restore status 129 VFPFMXR FPSCR, r5 @ restore status
124 130
125check_for_exception: 131check_for_exception:
@@ -175,9 +181,12 @@ ENTRY(vfp_save_state)
175 VFPFSTMIA r0, r2 @ save the working registers 181 VFPFSTMIA r0, r2 @ save the working registers
176 VFPFMRX r2, FPSCR @ current status 182 VFPFMRX r2, FPSCR @ current status
177 tst r1, #FPEXC_EX @ is there additional state to save? 183 tst r1, #FPEXC_EX @ is there additional state to save?
178 VFPFMRX r3, FPINST, NE @ FPINST (only if FPEXC.EX is set) 184 beq 1f
179 tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? 185 VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
180 VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present) 186 tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
187 beq 1f
188 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
1891:
181 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 190 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
182 mov pc, lr 191 mov pc, lr
183ENDPROC(vfp_save_state) 192ENDPROC(vfp_save_state)
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index c0d2c9bb952b..67ca340a7c85 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -371,6 +371,15 @@ static int __init vfp_init(void)
371 * in place; report VFP support to userspace. 371 * in place; report VFP support to userspace.
372 */ 372 */
373 elf_hwcap |= HWCAP_VFP; 373 elf_hwcap |= HWCAP_VFP;
374#ifdef CONFIG_NEON
375 /*
376 * Check for the presence of the Advanced SIMD
377 * load/store instructions, integer and single
378 * precision floating point operations.
379 */
380 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
381 elf_hwcap |= HWCAP_NEON;
382#endif
374 } 383 }
375 return 0; 384 return 0;
376} 385}