aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-09-30 04:03:44 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-09-30 04:03:44 -0400
commit648f3b69986b4d0ade57e59504a431b973ce2875 (patch)
tree6d0b6ccc6f243c1c582d52b88ab917eaffa022ba /arch/arm
parent8ee777fd915b0e36f35a430225729007a1df6441 (diff)
parent8ec58be9f3ff2ad4a4d7bde8f48b4a4c406768e7 (diff)
Merge branch 'hyp-boot-mode-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into devel-stable
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/boot/compressed/.gitignore1
-rw-r--r--arch/arm/boot/compressed/Makefile9
-rw-r--r--arch/arm/boot/compressed/head.S71
-rw-r--r--arch/arm/include/asm/assembler.h28
-rw-r--r--arch/arm/include/asm/opcodes-virt.h39
-rw-r--r--arch/arm/include/asm/opcodes.h181
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/virt.h69
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hyp-stub.S223
-rw-r--r--arch/arm/kernel/setup.c20
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/mm/Kconfig17
14 files changed, 652 insertions, 26 deletions
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index d0d441c429ae..f79a08efe000 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,6 +1,7 @@
1ashldi3.S 1ashldi3.S
2font.c 2font.c
3lib1funcs.S 3lib1funcs.S
4hyp-stub.S
4piggy.gzip 5piggy.gzip
5piggy.lzo 6piggy.lzo
6piggy.lzma 7piggy.lzma
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index bb267562e7ed..a517153a13ea 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -30,6 +30,10 @@ FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
30OBJS += string.o 30OBJS += string.o
31CFLAGS_string.o := -Os 31CFLAGS_string.o := -Os
32 32
33ifeq ($(CONFIG_ARM_VIRT_EXT),y)
34OBJS += hyp-stub.o
35endif
36
33# 37#
34# Architecture dependencies 38# Architecture dependencies
35# 39#
@@ -126,7 +130,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
126endif 130endif
127 131
128ccflags-y := -fpic -fno-builtin -I$(obj) 132ccflags-y := -fpic -fno-builtin -I$(obj)
129asflags-y := -Wa,-march=all 133asflags-y := -Wa,-march=all -DZIMAGE
130 134
131# Supply kernel BSS size to the decompressor via a linker symbol. 135# Supply kernel BSS size to the decompressor via a linker symbol.
132KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ 136KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
@@ -198,3 +202,6 @@ $(obj)/font.c: $(FONTC)
198 202
199$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) 203$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
200 @sed "$(SEDFLAGS)" < $< > $@ 204 @sed "$(SEDFLAGS)" < $< > $@
205
206$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
207 $(call cmd,shipped)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index bc67cbff3944..90275f036cd1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/assembler.h>
12 13
13/* 14/*
14 * Debugging stuff 15 * Debugging stuff
@@ -132,7 +133,12 @@ start:
132 .word start @ absolute load/run zImage address 133 .word start @ absolute load/run zImage address
133 .word _edata @ zImage end address 134 .word _edata @ zImage end address
134 THUMB( .thumb ) 135 THUMB( .thumb )
1351: mov r7, r1 @ save architecture ID 1361:
137 mrs r9, cpsr
138#ifdef CONFIG_ARM_VIRT_EXT
139 bl __hyp_stub_install @ get into SVC mode, reversibly
140#endif
141 mov r7, r1 @ save architecture ID
136 mov r8, r2 @ save atags pointer 142 mov r8, r2 @ save atags pointer
137 143
138#ifndef __ARM_ARCH_2__ 144#ifndef __ARM_ARCH_2__
@@ -148,9 +154,9 @@ start:
148 ARM( swi 0x123456 ) @ angel_SWI_ARM 154 ARM( swi 0x123456 ) @ angel_SWI_ARM
149 THUMB( svc 0xab ) @ angel_SWI_THUMB 155 THUMB( svc 0xab ) @ angel_SWI_THUMB
150not_angel: 156not_angel:
151 mrs r2, cpsr @ turn off interrupts to 157 safe_svcmode_maskall r0
152 orr r2, r2, #0xc0 @ prevent angel from running 158 msr spsr_cxsf, r9 @ Save the CPU boot mode in
153 msr cpsr_c, r2 159 @ SPSR
154#else 160#else
155 teqp pc, #0x0c000003 @ turn off interrupts 161 teqp pc, #0x0c000003 @ turn off interrupts
156#endif 162#endif
@@ -350,6 +356,20 @@ dtb_check_done:
350 adr r5, restart 356 adr r5, restart
351 bic r5, r5, #31 357 bic r5, r5, #31
352 358
359/* Relocate the hyp vector base if necessary */
360#ifdef CONFIG_ARM_VIRT_EXT
361 mrs r0, spsr
362 and r0, r0, #MODE_MASK
363 cmp r0, #HYP_MODE
364 bne 1f
365
366 bl __hyp_get_vectors
367 sub r0, r0, r5
368 add r0, r0, r10
369 bl __hyp_set_vectors
3701:
371#endif
372
353 sub r9, r6, r5 @ size to copy 373 sub r9, r6, r5 @ size to copy
354 add r9, r9, #31 @ rounded up to a multiple 374 add r9, r9, #31 @ rounded up to a multiple
355 bic r9, r9, #31 @ ... of 32 bytes 375 bic r9, r9, #31 @ ... of 32 bytes
@@ -458,11 +478,29 @@ not_relocated: mov r0, #0
458 bl decompress_kernel 478 bl decompress_kernel
459 bl cache_clean_flush 479 bl cache_clean_flush
460 bl cache_off 480 bl cache_off
461 mov r0, #0 @ must be zero
462 mov r1, r7 @ restore architecture number 481 mov r1, r7 @ restore architecture number
463 mov r2, r8 @ restore atags pointer 482 mov r2, r8 @ restore atags pointer
464 ARM( mov pc, r4 ) @ call kernel 483
465 THUMB( bx r4 ) @ entry point is always ARM 484#ifdef CONFIG_ARM_VIRT_EXT
485 mrs r0, spsr @ Get saved CPU boot mode
486 and r0, r0, #MODE_MASK
487 cmp r0, #HYP_MODE @ if not booted in HYP mode...
488 bne __enter_kernel @ boot kernel directly
489
490 adr r12, .L__hyp_reentry_vectors_offset
491 ldr r0, [r12]
492 add r0, r0, r12
493
494 bl __hyp_set_vectors
495 __HVC(0) @ otherwise bounce to hyp mode
496
497 b . @ should never be reached
498
499 .align 2
500.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
501#else
502 b __enter_kernel
503#endif
466 504
467 .align 2 505 .align 2
468 .type LC0, #object 506 .type LC0, #object
@@ -1196,6 +1234,25 @@ memdump: mov r12, r0
1196#endif 1234#endif
1197 1235
1198 .ltorg 1236 .ltorg
1237
1238#ifdef CONFIG_ARM_VIRT_EXT
1239.align 5
1240__hyp_reentry_vectors:
1241 W(b) . @ reset
1242 W(b) . @ undef
1243 W(b) . @ svc
1244 W(b) . @ pabort
1245 W(b) . @ dabort
1246 W(b) __enter_kernel @ hyp
1247 W(b) . @ irq
1248 W(b) . @ fiq
1249#endif /* CONFIG_ARM_VIRT_EXT */
1250
1251__enter_kernel:
1252 mov r0, #0 @ must be 0
1253 ARM( mov pc, r4 ) @ call kernel
1254 THUMB( bx r4 ) @ entry point is always ARM
1255
1199reloc_code_end: 1256reloc_code_end:
1200 1257
1201 .align 1258 .align
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c8b3bf4d825..683a1e6b6020 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -22,6 +22,7 @@
22 22
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/domain.h> 24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
25 26
26#define IOMEM(x) (x) 27#define IOMEM(x) (x)
27 28
@@ -240,6 +241,33 @@
240#endif 241#endif
241 242
242/* 243/*
244 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
245 * a scratch register for the macro to overwrite.
246 *
247 * This macro is intended for forcing the CPU into SVC mode at boot time.
248 * you cannot return to the original mode.
249 *
250 * Beware, it also clobers LR.
251 */
252.macro safe_svcmode_maskall reg:req
253 mrs \reg , cpsr
254 mov lr , \reg
255 and lr , lr , #MODE_MASK
256 cmp lr , #HYP_MODE
257 orr \reg , \reg , #PSR_A_BIT | PSR_I_BIT | PSR_F_BIT
258 bic \reg , \reg , #MODE_MASK
259 orr \reg , \reg , #SVC_MODE
260THUMB( orr \reg , \reg , #PSR_T_BIT )
261 msr spsr_cxsf, \reg
262 adr lr, BSYM(2f)
263 bne 1f
264 __MSR_ELR_HYP(14)
265 __ERET
2661: movs pc, lr
2672:
268.endm
269
270/*
243 * STRT/LDRT access macros with ARM and Thumb-2 variants 271 * STRT/LDRT access macros with ARM and Thumb-2 variants
244 */ 272 */
245#ifdef CONFIG_THUMB2_KERNEL 273#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
new file mode 100644
index 000000000000..efcfdf92d9d5
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -0,0 +1,39 @@
1/*
2 * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
3 * Copyright (C) 2012 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19#ifndef __ASM_ARM_OPCODES_VIRT_H
20#define __ASM_ARM_OPCODES_VIRT_H
21
22#include <asm/opcodes.h>
23
24#define __HVC(imm16) __inst_arm_thumb32( \
25 0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \
26 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
27)
28
29#define __ERET __inst_arm_thumb32( \
30 0xE160006E, \
31 0xF3DE8F00 \
32)
33
34#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
35 0xE12EF300 | regnum, \
36 0xF3808E30 | (regnum << 16) \
37)
38
39#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 19c48deda70f..74e211a6fb24 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -19,6 +19,33 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
19 19
20 20
21/* 21/*
22 * Assembler opcode byteswap helpers.
23 * These are only intended for use by this header: don't use them directly,
24 * because they will be suboptimal in most cases.
25 */
26#define ___asm_opcode_swab32(x) ( \
27 (((x) << 24) & 0xFF000000) \
28 | (((x) << 8) & 0x00FF0000) \
29 | (((x) >> 8) & 0x0000FF00) \
30 | (((x) >> 24) & 0x000000FF) \
31)
32#define ___asm_opcode_swab16(x) ( \
33 (((x) << 8) & 0xFF00) \
34 | (((x) >> 8) & 0x00FF) \
35)
36#define ___asm_opcode_swahb32(x) ( \
37 (((x) << 8) & 0xFF00FF00) \
38 | (((x) >> 8) & 0x00FF00FF) \
39)
40#define ___asm_opcode_swahw32(x) ( \
41 (((x) << 16) & 0xFFFF0000) \
42 | (((x) >> 16) & 0x0000FFFF) \
43)
44#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
45#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
46
47
48/*
22 * Opcode byteswap helpers 49 * Opcode byteswap helpers
23 * 50 *
24 * These macros help with converting instructions between a canonical integer 51 * These macros help with converting instructions between a canonical integer
@@ -41,39 +68,163 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
41 * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not 68 * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
42 * represent any valid Thumb-2 instruction. For this range, 69 * represent any valid Thumb-2 instruction. For this range,
43 * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. 70 * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
71 *
72 * The ___asm variants are intended only for use by this header, in situations
73 * involving inline assembler. For .S files, the normal __opcode_*() macros
74 * should do the right thing.
44 */ 75 */
76#ifdef __ASSEMBLY__
45 77
46#ifndef __ASSEMBLY__ 78#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
79#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
80#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
81#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
82#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
83#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
84
85#else /* ! __ASSEMBLY__ */
47 86
48#include <linux/types.h> 87#include <linux/types.h>
49#include <linux/swab.h> 88#include <linux/swab.h>
50 89
90#define ___opcode_swab32(x) swab32(x)
91#define ___opcode_swab16(x) swab16(x)
92#define ___opcode_swahb32(x) swahb32(x)
93#define ___opcode_swahw32(x) swahw32(x)
94#define ___opcode_identity32(x) ((u32)(x))
95#define ___opcode_identity16(x) ((u16)(x))
96
97#endif /* ! __ASSEMBLY__ */
98
99
51#ifdef CONFIG_CPU_ENDIAN_BE8 100#ifdef CONFIG_CPU_ENDIAN_BE8
52#define __opcode_to_mem_arm(x) swab32(x) 101
53#define __opcode_to_mem_thumb16(x) swab16(x) 102#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
54#define __opcode_to_mem_thumb32(x) swahb32(x) 103#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
55#else 104#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
56#define __opcode_to_mem_arm(x) ((u32)(x)) 105#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
57#define __opcode_to_mem_thumb16(x) ((u16)(x)) 106#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
58#define __opcode_to_mem_thumb32(x) swahw32(x) 107#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
108
109#else /* ! CONFIG_CPU_ENDIAN_BE8 */
110
111#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
112#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
113#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
114#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
115#ifndef CONFIG_CPU_ENDIAN_BE32
116/*
117 * On BE32 systems, using 32-bit accesses to store Thumb instructions will not
118 * work in all cases, due to alignment constraints. For now, a correct
119 * version is not provided for BE32.
120 */
121#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
122#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
59#endif 123#endif
60 124
125#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
126
61#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) 127#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
62#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) 128#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
129#ifndef CONFIG_CPU_ENDIAN_BE32
63#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) 130#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
131#endif
64 132
65/* Operations specific to Thumb opcodes */ 133/* Operations specific to Thumb opcodes */
66 134
67/* Instruction size checks: */ 135/* Instruction size checks: */
68#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) 136#define __opcode_is_thumb32(x) ( \
69#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) 137 ((x) & 0xF8000000) == 0xE8000000 \
138 || ((x) & 0xF0000000) == 0xF0000000 \
139)
140#define __opcode_is_thumb16(x) ( \
141 ((x) & 0xFFFF0000) == 0 \
142 && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \
143)
70 144
71/* Operations to construct or split 32-bit Thumb instructions: */ 145/* Operations to construct or split 32-bit Thumb instructions: */
72#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) 146#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
73#define __opcode_thumb32_second(x) ((u16)(x)) 147#define __opcode_thumb32_second(x) (___opcode_identity16(x))
74#define __opcode_thumb32_compose(first, second) \ 148#define __opcode_thumb32_compose(first, second) ( \
75 (((u32)(u16)(first) << 16) | (u32)(u16)(second)) 149 (___opcode_identity32(___opcode_identity16(first)) << 16) \
150 | ___opcode_identity32(___opcode_identity16(second)) \
151)
152#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
153#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
154#define ___asm_opcode_thumb32_compose(first, second) ( \
155 (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
156 | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \
157)
76 158
77#endif /* __ASSEMBLY__ */ 159/*
160 * Opcode injection helpers
161 *
162 * In rare cases it is necessary to assemble an opcode which the
163 * assembler does not support directly, or which would normally be
164 * rejected because of the CFLAGS or AFLAGS used to build the affected
165 * file.
166 *
167 * Before using these macros, consider carefully whether it is feasible
168 * instead to change the build flags for your file, or whether it really
169 * makes sense to support old assembler versions when building that
170 * particular kernel feature.
171 *
172 * The macros defined here should only be used where there is no viable
173 * alternative.
174 *
175 *
176 * __inst_arm(x): emit the specified ARM opcode
177 * __inst_thumb16(x): emit the specified 16-bit Thumb opcode
178 * __inst_thumb32(x): emit the specified 32-bit Thumb opcode
179 *
180 * __inst_arm_thumb16(arm, thumb): emit either the specified arm or
181 * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2
182 * kernel is being built
183 *
184 * __inst_arm_thumb32(arm, thumb): emit either the specified arm or
185 * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2
186 * kernel is being built
187 *
188 *
189 * Note that using these macros directly is poor practice. Instead, you
190 * should use them to define human-readable wrapper macros to encode the
191 * instructions that you care about. In code which might run on ARMv7 or
192 * above, you can usually use the __inst_arm_thumb{16,32} macros to
193 * specify the ARM and Thumb alternatives at the same time. This ensures
194 * that the correct opcode gets emitted depending on the instruction set
195 * used for the kernel build.
196 *
197 * Look at opcodes-virt.h for an example of how to use these macros.
198 */
199#include <linux/stringify.h>
200
201#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
202#define __inst_thumb32(x) ___inst_thumb32( \
203 ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \
204 ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \
205)
206#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
207
208#ifdef CONFIG_THUMB2_KERNEL
209#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
210 __inst_thumb16(thumb_opcode)
211#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
212 __inst_thumb32(thumb_opcode)
213#else
214#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
215#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
216#endif
217
218/* Helpers for the helpers. Don't use these directly. */
219#ifdef __ASSEMBLY__
220#define ___inst_arm(x) .long x
221#define ___inst_thumb16(x) .short x
222#define ___inst_thumb32(first, second) .short first, second
223#else
224#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
225#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
226#define ___inst_thumb32(first, second) \
227 ".short " __stringify(first) ", " __stringify(second) "\n\t"
228#endif
78 229
79#endif /* __ASM_ARM_OPCODES_H */ 230#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 355ece523f41..91ef6c231c47 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -44,6 +44,7 @@
44#define IRQ_MODE 0x00000012 44#define IRQ_MODE 0x00000012
45#define SVC_MODE 0x00000013 45#define SVC_MODE 0x00000013
46#define ABT_MODE 0x00000017 46#define ABT_MODE 0x00000017
47#define HYP_MODE 0x0000001a
47#define UND_MODE 0x0000001b 48#define UND_MODE 0x0000001b
48#define SYSTEM_MODE 0x0000001f 49#define SYSTEM_MODE 0x0000001f
49#define MODE32_BIT 0x00000010 50#define MODE32_BIT 0x00000010
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
new file mode 100644
index 000000000000..86164df86cb4
--- /dev/null
+++ b/arch/arm/include/asm/virt.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef VIRT_H
20#define VIRT_H
21
22#include <asm/ptrace.h>
23
24/*
25 * Flag indicating that the kernel was not entered in the same mode on every
26 * CPU. The zImage loader stashes this value in an SPSR, so we need an
27 * architecturally defined flag bit here (the N flag, as it happens)
28 */
29#define BOOT_CPU_MODE_MISMATCH (1<<31)
30
31#ifndef __ASSEMBLY__
32
33#ifdef CONFIG_ARM_VIRT_EXT
34/*
35 * __boot_cpu_mode records what mode the primary CPU was booted in.
36 * A correctly-implemented bootloader must start all CPUs in the same mode:
37 * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
38 * that some CPU(s) were booted in a different mode.
39 *
40 * This allows the kernel to flag an error when the secondaries have come up.
41 */
42extern int __boot_cpu_mode;
43
44void __hyp_set_vectors(unsigned long phys_vector_base);
45unsigned long __hyp_get_vectors(void);
46#else
47#define __boot_cpu_mode (SVC_MODE)
48#endif
49
50#ifndef ZIMAGE
51void hyp_mode_check(void);
52
53/* Reports the availability of HYP mode */
54static inline bool is_hyp_mode_available(void)
55{
56 return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
57 !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
58}
59
60/* Check if the bootloader has booted CPUs in different modes */
61static inline bool is_hyp_mode_mismatched(void)
62{
63 return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
64}
65#endif
66
67#endif /* __ASSEMBLY__ */
68
69#endif /* ! VIRT_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 7ad2d5cf7008..49b61a3f5b29 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,4 +82,6 @@ head-y := head$(MMUEXT).o
82obj-$(CONFIG_DEBUG_LL) += debug.o 82obj-$(CONFIG_DEBUG_LL) += debug.o
83obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 83obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
84 84
85obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
86
85extra-y := $(head-y) vmlinux.lds 87extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 3db960e20cb8..27093e4feef8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -83,8 +83,12 @@ ENTRY(stext)
83 THUMB( .thumb ) @ switch to Thumb now. 83 THUMB( .thumb ) @ switch to Thumb now.
84 THUMB(1: ) 84 THUMB(1: )
85 85
86 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 86#ifdef CONFIG_ARM_VIRT_EXT
87 @ and irqs disabled 87 bl __hyp_stub_install
88#endif
89 @ ensure svc mode and all interrupts masked
90 safe_svcmode_maskall r9
91
88 mrc p15, 0, r9, c0, c0 @ get processor id 92 mrc p15, 0, r9, c0, c0 @ get processor id
89 bl __lookup_processor_type @ r5=procinfo r9=cpuid 93 bl __lookup_processor_type @ r5=procinfo r9=cpuid
90 movs r10, r5 @ invalid processor (r5=0)? 94 movs r10, r5 @ invalid processor (r5=0)?
@@ -326,7 +330,11 @@ ENTRY(secondary_startup)
326 * the processor type - there is no need to check the machine type 330 * the processor type - there is no need to check the machine type
327 * as it has already been validated by the primary processor. 331 * as it has already been validated by the primary processor.
328 */ 332 */
329 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 333#ifdef CONFIG_ARM_VIRT_EXT
334 bl __hyp_stub_install
335#endif
336 safe_svcmode_maskall r9
337
330 mrc p15, 0, r9, c0, c0 @ get processor id 338 mrc p15, 0, r9, c0, c0 @ get processor id
331 bl __lookup_processor_type 339 bl __lookup_processor_type
332 movs r10, r5 @ invalid processor? 340 movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
new file mode 100644
index 000000000000..65b2417aebce
--- /dev/null
+++ b/arch/arm/kernel/hyp-stub.S
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/virt.h>
23
24#ifndef ZIMAGE
25/*
26 * For the kernel proper, we need to find out the CPU boot mode long after
27 * boot, so we need to store it in a writable variable.
28 *
29 * This is not in .bss, because we set it sufficiently early that the boot-time
30 * zeroing of .bss would clobber it.
31 */
32.data
33ENTRY(__boot_cpu_mode)
34 .long 0
35.text
36
37 /*
38 * Save the primary CPU boot mode. Requires 3 scratch registers.
39 */
40 .macro store_primary_cpu_mode reg1, reg2, reg3
41 mrs \reg1, cpsr
42 and \reg1, \reg1, #MODE_MASK
43 adr \reg2, .L__boot_cpu_mode_offset
44 ldr \reg3, [\reg2]
45 str \reg1, [\reg2, \reg3]
46 .endm
47
48 /*
49 * Compare the current mode with the one saved on the primary CPU.
50 * If they don't match, record that fact. The Z bit indicates
51 * if there's a match or not.
52 * Requires 3 additionnal scratch registers.
53 */
54 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
55 adr \reg2, .L__boot_cpu_mode_offset
56 ldr \reg3, [\reg2]
57 ldr \reg1, [\reg2, \reg3]
58 cmp \mode, \reg1 @ matches primary CPU boot mode?
59 orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
60 strne r7, [r5, r6] @ record what happened and give up
61 .endm
62
63#else /* ZIMAGE */
64
65 .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
66 .endm
67
68/*
69 * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
70 * consistency checking:
71 */
72 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
73 cmp \mode, \mode
74 .endm
75
76#endif /* ZIMAGE */
77
78/*
79 * Hypervisor stub installation functions.
80 *
81 * These must be called with the MMU and D-cache off.
82 * They are not ABI compliant and are only intended to be called from the kernel
83 * entry points in head.S.
84 */
85@ Call this from the primary CPU
86ENTRY(__hyp_stub_install)
87 store_primary_cpu_mode r4, r5, r6
88ENDPROC(__hyp_stub_install)
89
90 @ fall through...
91
92@ Secondary CPUs should call here
93ENTRY(__hyp_stub_install_secondary)
94 mrs r4, cpsr
95 and r4, r4, #MODE_MASK
96
97 /*
98 * If the secondary has booted with a different mode, give up
99 * immediately.
100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 bxne lr
103
104 /*
105 * Once we have given up on one CPU, we do not try to install the
106 * stub hypervisor on the remaining ones: because the saved boot mode
107 * is modified, it can't compare equal to the CPSR mode field any
108 * more.
109 *
110 * Otherwise...
111 */
112
113 cmp r4, #HYP_MODE
114 bxne lr @ give up if the CPU is not in HYP mode
115
116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set
118 * state etc.
119 * Turn off all traps
120 * Eventually, CPU-specific code might be needed -- assume not for now
121 *
122 * This code relies on the "eret" instruction to synchronize the
123 * various coprocessor accesses.
124 */
125 @ Now install the hypervisor stub:
126 adr r7, __hyp_stub_vectors
127 mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
128
129 @ Disable all traps, so we don't get any nasty surprise
130 mov r7, #0
131 mcr p15, 4, r7, c1, c1, 0 @ HCR
132 mcr p15, 4, r7, c1, c1, 2 @ HCPTR
133 mcr p15, 4, r7, c1, c1, 3 @ HSTR
134
135THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
136#ifdef CONFIG_CPU_BIG_ENDIAN
137 orr r7, #(1 << 9) @ HSCTLR.EE
138#endif
139 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
140
141 mrc p15, 4, r7, c1, c1, 1 @ HDCR
142 and r7, #0x1f @ Preserve HPMN
143 mcr p15, 4, r7, c1, c1, 1 @ HDCR
144
145#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
146 @ make CNTP_* and CNTPCT accessible from PL1
147 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
148 lsr r7, #16
149 and r7, #0xf
150 cmp r7, #1
151 bne 1f
152 mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
153 orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
154 mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
1551:
156#endif
157
158 bic r7, r4, #MODE_MASK
159 orr r7, r7, #SVC_MODE
160THUMB( orr r7, r7, #PSR_T_BIT )
161 msr spsr_cxsf, r7 @ This is SPSR_hyp.
162
163 __MSR_ELR_HYP(14) @ msr elr_hyp, lr
164 __ERET @ return, switching to SVC mode
165 @ The boot CPU mode is left in r4.
166ENDPROC(__hyp_stub_install_secondary)
167
168__hyp_stub_do_trap:
169 cmp r0, #-1
170 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
171 mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
172 __ERET
173ENDPROC(__hyp_stub_do_trap)
174
175/*
176 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
177 * vectors as part of hypervisor installation. On an SMP system, this should
178 * be called on each CPU.
179 *
180 * r0 must be the physical address of the new vector table (which must lie in
181 * the bottom 4GB of physical address space.
182 *
183 * r0 must be 32-byte aligned.
184 *
185 * Before calling this, you must check that the stub hypervisor is installed
186 * everywhere, by waiting for any secondary CPUs to be brought up and then
187 * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
188 *
189 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
190 * something else went wrong... in such cases, trying to install a new
191 * hypervisor is unlikely to work as desired.
192 *
193 * When you call into your shiny new hypervisor, sp_hyp will contain junk,
194 * so you will need to set that to something sensible at the new hypervisor's
195 * initialisation entry point.
196 */
197ENTRY(__hyp_get_vectors)
198 mov r0, #-1
199ENDPROC(__hyp_get_vectors)
200 @ fall through
201ENTRY(__hyp_set_vectors)
202 __HVC(0)
203 bx lr
204ENDPROC(__hyp_set_vectors)
205
206#ifndef ZIMAGE
207.align 2
208.L__boot_cpu_mode_offset:
209 .long __boot_cpu_mode - .
210#endif
211
212.align 5
213__hyp_stub_vectors:
214__hyp_stub_reset: W(b) .
215__hyp_stub_und: W(b) .
216__hyp_stub_svc: W(b) .
217__hyp_stub_pabort: W(b) .
218__hyp_stub_dabort: W(b) .
219__hyp_stub_trap: W(b) __hyp_stub_do_trap
220__hyp_stub_irq: W(b) .
221__hyp_stub_fiq: W(b) .
222ENDPROC(__hyp_stub_vectors)
223
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a81dcecc7343..04fd01feea86 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -55,6 +55,7 @@
55#include <asm/traps.h> 55#include <asm/traps.h>
56#include <asm/unwind.h> 56#include <asm/unwind.h>
57#include <asm/memblock.h> 57#include <asm/memblock.h>
58#include <asm/virt.h>
58 59
59#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 60#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
60#include "compat.h" 61#include "compat.h"
@@ -937,6 +938,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
937 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 938 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
938} 939}
939 940
941void __init hyp_mode_check(void)
942{
943#ifdef CONFIG_ARM_VIRT_EXT
944 if (is_hyp_mode_available()) {
945 pr_info("CPU: All CPU(s) started in HYP mode.\n");
946 pr_info("CPU: Virtualization extensions available.\n");
947 } else if (is_hyp_mode_mismatched()) {
948 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
949 __boot_cpu_mode & MODE_MASK);
950 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
951 } else
952 pr_info("CPU: All CPU(s) started in SVC mode.\n");
953#endif
954}
955
940void __init setup_arch(char **cmdline_p) 956void __init setup_arch(char **cmdline_p)
941{ 957{
942 struct machine_desc *mdesc; 958 struct machine_desc *mdesc;
@@ -980,6 +996,10 @@ void __init setup_arch(char **cmdline_p)
980 if (is_smp()) 996 if (is_smp())
981 smp_init_cpus(); 997 smp_init_cpus();
982#endif 998#endif
999
1000 if (!is_smp())
1001 hyp_mode_check();
1002
983 reserve_crashkernel(); 1003 reserve_crashkernel();
984 1004
985 tcm_init(); 1005 tcm_init();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 199558b9462e..f34514a0834e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/ptrace.h> 42#include <asm/ptrace.h>
43#include <asm/localtimer.h> 43#include <asm/localtimer.h>
44#include <asm/smp_plat.h> 44#include <asm/smp_plat.h>
45#include <asm/virt.h>
45 46
46/* 47/*
47 * as from 2.5, kernels no longer have an init_tasks structure 48 * as from 2.5, kernels no longer have an init_tasks structure
@@ -290,6 +291,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
290 num_online_cpus(), 291 num_online_cpus(),
291 bogosum / (500000/HZ), 292 bogosum / (500000/HZ),
292 (bogosum / (5000/HZ)) % 100); 293 (bogosum / (5000/HZ)) % 100);
294
295 hyp_mode_check();
293} 296}
294 297
295void __init smp_prepare_boot_cpu(void) 298void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 101b9681c08c..c9a4963b5c3d 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -624,6 +624,23 @@ config ARM_THUMBEE
624 Say Y here if you have a CPU with the ThumbEE extension and code to 624 Say Y here if you have a CPU with the ThumbEE extension and code to
625 make use of it. Say N for code that can run on CPUs without ThumbEE. 625 make use of it. Say N for code that can run on CPUs without ThumbEE.
626 626
627config ARM_VIRT_EXT
628 bool "Native support for the ARM Virtualization Extensions"
629 depends on MMU && CPU_V7
630 help
631 Enable the kernel to make use of the ARM Virtualization
632 Extensions to install hypervisors without run-time firmware
633 assistance.
634
635 A compliant bootloader is required in order to make maximum
636 use of this feature. Refer to Documentation/arm/Booting for
637 details.
638
639 It is safe to enable this option even if the kernel may not be
640 booted in HYP mode, may not have support for the
641 virtualization extensions, or may be booted with a
642 non-compliant bootloader.
643
627config SWP_EMULATE 644config SWP_EMULATE
628 bool "Emulate SWP/SWPB instructions" 645 bool "Emulate SWP/SWPB instructions"
629 depends on !CPU_USE_DOMAINS && CPU_V7 646 depends on !CPU_USE_DOMAINS && CPU_V7