diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 29 | ||||
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/glue-cache.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/opcodes-virt.h | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/virt.h | 69 |
6 files changed, 125 insertions, 0 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 5c8b3bf4d82..2ef95813fce 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
24 | #include <asm/domain.h> | 24 | #include <asm/domain.h> |
25 | #include <asm/opcodes-virt.h> | ||
25 | 26 | ||
26 | #define IOMEM(x) (x) | 27 | #define IOMEM(x) (x) |
27 | 28 | ||
@@ -240,6 +241,34 @@ | |||
240 | #endif | 241 | #endif |
241 | 242 | ||
242 | /* | 243 | /* |
244 | * Helper macro to enter SVC mode cleanly and mask interrupts. reg is | ||
245 | * a scratch register for the macro to overwrite. | ||
246 | * | ||
247 | * This macro is intended for forcing the CPU into SVC mode at boot time. | ||
248 | * you cannot return to the original mode. | ||
249 | * | ||
250 | * Beware, it also clobers LR. | ||
251 | */ | ||
252 | .macro safe_svcmode_maskall reg:req | ||
253 | mrs \reg , cpsr | ||
254 | mov lr , \reg | ||
255 | and lr , lr , #MODE_MASK | ||
256 | cmp lr , #HYP_MODE | ||
257 | orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | ||
258 | bic \reg , \reg , #MODE_MASK | ||
259 | orr \reg , \reg , #SVC_MODE | ||
260 | THUMB( orr \reg , \reg , #PSR_T_BIT ) | ||
261 | bne 1f | ||
262 | orr \reg, \reg, #PSR_A_BIT | ||
263 | adr lr, BSYM(2f) | ||
264 | msr spsr_cxsf, \reg | ||
265 | __MSR_ELR_HYP(14) | ||
266 | __ERET | ||
267 | 1: msr cpsr_c, \reg | ||
268 | 2: | ||
269 | .endm | ||
270 | |||
271 | /* | ||
243 | * STRT/LDRT access macros with ARM and Thumb-2 variants | 272 | * STRT/LDRT access macros with ARM and Thumb-2 variants |
244 | */ | 273 | */ |
245 | #ifdef CONFIG_THUMB2_KERNEL | 274 | #ifdef CONFIG_THUMB2_KERNEL |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e4448e16046..e1489c54cd1 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -49,6 +49,13 @@ | |||
49 | * | 49 | * |
50 | * Unconditionally clean and invalidate the entire cache. | 50 | * Unconditionally clean and invalidate the entire cache. |
51 | * | 51 | * |
52 | * flush_kern_louis() | ||
53 | * | ||
54 | * Flush data cache levels up to the level of unification | ||
55 | * inner shareable and invalidate the I-cache. | ||
56 | * Only needed from v7 onwards, falls back to flush_cache_all() | ||
57 | * for all other processor versions. | ||
58 | * | ||
52 | * flush_user_all() | 59 | * flush_user_all() |
53 | * | 60 | * |
54 | * Clean and invalidate all user space cache entries | 61 | * Clean and invalidate all user space cache entries |
@@ -97,6 +104,7 @@ | |||
97 | struct cpu_cache_fns { | 104 | struct cpu_cache_fns { |
98 | void (*flush_icache_all)(void); | 105 | void (*flush_icache_all)(void); |
99 | void (*flush_kern_all)(void); | 106 | void (*flush_kern_all)(void); |
107 | void (*flush_kern_louis)(void); | ||
100 | void (*flush_user_all)(void); | 108 | void (*flush_user_all)(void); |
101 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | 109 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); |
102 | 110 | ||
@@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache; | |||
119 | 127 | ||
120 | #define __cpuc_flush_icache_all cpu_cache.flush_icache_all | 128 | #define __cpuc_flush_icache_all cpu_cache.flush_icache_all |
121 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | 129 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all |
130 | #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis | ||
122 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | 131 | #define __cpuc_flush_user_all cpu_cache.flush_user_all |
123 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | 132 | #define __cpuc_flush_user_range cpu_cache.flush_user_range |
124 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | 133 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range |
@@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache; | |||
139 | 148 | ||
140 | extern void __cpuc_flush_icache_all(void); | 149 | extern void __cpuc_flush_icache_all(void); |
141 | extern void __cpuc_flush_kern_all(void); | 150 | extern void __cpuc_flush_kern_all(void); |
151 | extern void __cpuc_flush_kern_louis(void); | ||
142 | extern void __cpuc_flush_user_all(void); | 152 | extern void __cpuc_flush_user_all(void); |
143 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | 153 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); |
144 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | 154 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); |
@@ -204,6 +214,11 @@ static inline void __flush_icache_all(void) | |||
204 | __flush_icache_preferred(); | 214 | __flush_icache_preferred(); |
205 | } | 215 | } |
206 | 216 | ||
217 | /* | ||
218 | * Flush caches up to Level of Unification Inner Shareable | ||
219 | */ | ||
220 | #define flush_cache_louis() __cpuc_flush_kern_louis() | ||
221 | |||
207 | #define flush_cache_all() __cpuc_flush_kern_all() | 222 | #define flush_cache_all() __cpuc_flush_kern_all() |
208 | 223 | ||
209 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) | 224 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index 4f8d2c0dc44..cca9f15704e 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h | |||
@@ -132,6 +132,7 @@ | |||
132 | #ifndef MULTI_CACHE | 132 | #ifndef MULTI_CACHE |
133 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) | 133 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) |
134 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | 134 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) |
135 | #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) | ||
135 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | 136 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) |
136 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | 137 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) |
137 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | 138 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) |
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h index b85665a96f8..efcfdf92d9d 100644 --- a/arch/arm/include/asm/opcodes-virt.h +++ b/arch/arm/include/asm/opcodes-virt.h | |||
@@ -26,4 +26,14 @@ | |||
26 | 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ | 26 | 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ |
27 | ) | 27 | ) |
28 | 28 | ||
29 | #define __ERET __inst_arm_thumb32( \ | ||
30 | 0xE160006E, \ | ||
31 | 0xF3DE8F00 \ | ||
32 | ) | ||
33 | |||
34 | #define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \ | ||
35 | 0xE12EF300 | regnum, \ | ||
36 | 0xF3808E30 | (regnum << 16) \ | ||
37 | ) | ||
38 | |||
29 | #endif /* ! __ASM_ARM_OPCODES_VIRT_H */ | 39 | #endif /* ! __ASM_ARM_OPCODES_VIRT_H */ |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 355ece523f4..91ef6c231c4 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define IRQ_MODE 0x00000012 | 44 | #define IRQ_MODE 0x00000012 |
45 | #define SVC_MODE 0x00000013 | 45 | #define SVC_MODE 0x00000013 |
46 | #define ABT_MODE 0x00000017 | 46 | #define ABT_MODE 0x00000017 |
47 | #define HYP_MODE 0x0000001a | ||
47 | #define UND_MODE 0x0000001b | 48 | #define UND_MODE 0x0000001b |
48 | #define SYSTEM_MODE 0x0000001f | 49 | #define SYSTEM_MODE 0x0000001f |
49 | #define MODE32_BIT 0x00000010 | 50 | #define MODE32_BIT 0x00000010 |
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h new file mode 100644 index 00000000000..86164df86cb --- /dev/null +++ b/arch/arm/include/asm/virt.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Linaro Limited. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef VIRT_H | ||
20 | #define VIRT_H | ||
21 | |||
22 | #include <asm/ptrace.h> | ||
23 | |||
24 | /* | ||
25 | * Flag indicating that the kernel was not entered in the same mode on every | ||
26 | * CPU. The zImage loader stashes this value in an SPSR, so we need an | ||
27 | * architecturally defined flag bit here (the N flag, as it happens) | ||
28 | */ | ||
29 | #define BOOT_CPU_MODE_MISMATCH (1<<31) | ||
30 | |||
31 | #ifndef __ASSEMBLY__ | ||
32 | |||
33 | #ifdef CONFIG_ARM_VIRT_EXT | ||
34 | /* | ||
35 | * __boot_cpu_mode records what mode the primary CPU was booted in. | ||
36 | * A correctly-implemented bootloader must start all CPUs in the same mode: | ||
37 | * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate | ||
38 | * that some CPU(s) were booted in a different mode. | ||
39 | * | ||
40 | * This allows the kernel to flag an error when the secondaries have come up. | ||
41 | */ | ||
42 | extern int __boot_cpu_mode; | ||
43 | |||
44 | void __hyp_set_vectors(unsigned long phys_vector_base); | ||
45 | unsigned long __hyp_get_vectors(void); | ||
46 | #else | ||
47 | #define __boot_cpu_mode (SVC_MODE) | ||
48 | #endif | ||
49 | |||
50 | #ifndef ZIMAGE | ||
51 | void hyp_mode_check(void); | ||
52 | |||
53 | /* Reports the availability of HYP mode */ | ||
54 | static inline bool is_hyp_mode_available(void) | ||
55 | { | ||
56 | return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE && | ||
57 | !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH)); | ||
58 | } | ||
59 | |||
60 | /* Check if the bootloader has booted CPUs in different modes */ | ||
61 | static inline bool is_hyp_mode_mismatched(void) | ||
62 | { | ||
63 | return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | #endif /* __ASSEMBLY__ */ | ||
68 | |||
69 | #endif /* ! VIRT_H */ | ||