aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2017-04-03 14:37:46 -0400
committerChristoffer Dall <cdall@linaro.org>2017-04-09 10:49:24 -0400
commit9da5ac236de6ab2189c999eb9ddddeef1431ab68 (patch)
tree586a9df656c6c6e1d5c2c341cc4204c5cb9f3d5c
parent1342337bc80a5bfb9aa83574da9fb2e22cc64121 (diff)
ARM: soft-reboot into same mode that we entered the kernel
When we soft-reboot (eg, kexec) from one kernel into the next, we need to ensure that we enter the new kernel in the same processor mode as when we were entered, so that (eg) the new kernel can install its own hypervisor - the old kernel's hypervisor will have been overwritten. In order to do this, we need to pass a flag to cpu_reset() so it knows what to do, and we need to modify the kernel's own hypervisor stub to allow it to handle a soft-reboot. As we are always guaranteed to install our own hypervisor if we're entered in HYP32 mode, and KVM will have moved itself out of the way on kexec/normal reboot, we can assume that our hypervisor is in place when we want to kexec, so changing our hypervisor API should not be a problem. Tested-by: Keerthy <j-keerthy@ti.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
-rw-r--r--arch/arm/include/asm/proc-fns.h4
-rw-r--r--arch/arm/kernel/hyp-stub.S13
-rw-r--r--arch/arm/kernel/reboot.c7
-rw-r--r--arch/arm/mm/proc-v7.S12
4 files changed, 28 insertions, 8 deletions
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8877ad5ffe10..f2e1af45bd6f 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -43,7 +43,7 @@ extern struct processor {
43 /* 43 /*
44 * Special stuff for a reset 44 * Special stuff for a reset
45 */ 45 */
46 void (*reset)(unsigned long addr) __attribute__((noreturn)); 46 void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
47 /* 47 /*
48 * Idle the processor 48 * Idle the processor
49 */ 49 */
@@ -88,7 +88,7 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
88#else 88#else
89extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 89extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
90#endif 90#endif
91extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 91extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
92 92
93/* These three are private to arch/arm/kernel/suspend.c */ 93/* These three are private to arch/arm/kernel/suspend.c */
94extern void cpu_do_suspend(void *); 94extern void cpu_do_suspend(void *);
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index f3e9ba5fb642..82915231c6f8 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -24,6 +24,7 @@
24 24
25#define HVC_GET_VECTORS 0 25#define HVC_GET_VECTORS 0
26#define HVC_SET_VECTORS 1 26#define HVC_SET_VECTORS 1
27#define HVC_SOFT_RESTART 2
27 28
28#ifndef ZIMAGE 29#ifndef ZIMAGE
29/* 30/*
@@ -215,6 +216,10 @@ __hyp_stub_do_trap:
215 mcr p15, 4, r1, c12, c0, 0 @ set HVBAR 216 mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
216 b __hyp_stub_exit 217 b __hyp_stub_exit
217 218
2191: teq r0, #HVC_SOFT_RESTART
220 bne 1f
221 bx r3
222
2181: mov r0, #-1 2231: mov r0, #-1
219 224
220__hyp_stub_exit: 225__hyp_stub_exit:
@@ -256,6 +261,14 @@ ENTRY(__hyp_set_vectors)
256 ret lr 261 ret lr
257ENDPROC(__hyp_set_vectors) 262ENDPROC(__hyp_set_vectors)
258 263
264ENTRY(__hyp_soft_restart)
265 mov r3, r0
266 mov r0, #HVC_SOFT_RESTART
267 __HVC(0)
268 mov r0, r3
269 ret lr
270ENDPROC(__hyp_soft_restart)
271
259#ifndef ZIMAGE 272#ifndef ZIMAGE
260.align 2 273.align 2
261.L__boot_cpu_mode_offset: 274.L__boot_cpu_mode_offset:
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 3fa867a2aae6..3b2aa9a9fe26 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -12,10 +12,11 @@
12 12
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/idmap.h> 14#include <asm/idmap.h>
15#include <asm/virt.h>
15 16
16#include "reboot.h" 17#include "reboot.h"
17 18
18typedef void (*phys_reset_t)(unsigned long); 19typedef void (*phys_reset_t)(unsigned long, bool);
19 20
20/* 21/*
21 * Function pointers to optional machine specific functions 22 * Function pointers to optional machine specific functions
@@ -51,7 +52,9 @@ static void __soft_restart(void *addr)
51 52
52 /* Switch to the identity mapping. */ 53 /* Switch to the identity mapping. */
53 phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset); 54 phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
54 phys_reset((unsigned long)addr); 55
56 /* original stub should be restored by kvm */
57 phys_reset((unsigned long)addr, is_hyp_mode_available());
55 58
56 /* Should never get here. */ 59 /* Should never get here. */
57 BUG(); 60 BUG();
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index d00d52c9de3e..1846ca4255d0 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -53,11 +53,15 @@ ENDPROC(cpu_v7_proc_fin)
53 .align 5 53 .align 5
54 .pushsection .idmap.text, "ax" 54 .pushsection .idmap.text, "ax"
55ENTRY(cpu_v7_reset) 55ENTRY(cpu_v7_reset)
56 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 56 mrc p15, 0, r2, c1, c0, 0 @ ctrl register
57 bic r1, r1, #0x1 @ ...............m 57 bic r2, r2, #0x1 @ ...............m
58 THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) 58 THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
59 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 59 mcr p15, 0, r2, c1, c0, 0 @ disable MMU
60 isb 60 isb
61#ifdef CONFIG_ARM_VIRT_EXT
62 teq r1, #0
63 bne __hyp_soft_restart
64#endif
61 bx r0 65 bx r0
62ENDPROC(cpu_v7_reset) 66ENDPROC(cpu_v7_reset)
63 .popsection 67 .popsection