aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeoff Levand <geoff@infradead.org>2016-06-23 13:54:48 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-06-27 11:31:25 -0400
commitd28f6df1305a86715e4e7ea0f043ba01c0a0e8d9 (patch)
tree0d1f3fba97565a04c56ecf83ea24a0021d056e91
parentf9076ecfb1216a478312b1c078d04792df6d4477 (diff)
arm64/kexec: Add core kexec support
Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the arm64 architecture that add support for the kexec re-boot mechanism (CONFIG_KEXEC) on arm64 platforms. Signed-off-by: Geoff Levand <geoff@infradead.org> Reviewed-by: James Morse <james.morse@arm.com> [catalin.marinas@arm.com: removed dead code following James Morse's comments] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/Kconfig10
-rw-r--r--arch/arm64/include/asm/kexec.h48
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/machine_kexec.c170
-rw-r--r--arch/arm64/kernel/relocate_kernel.S130
-rw-r--r--include/uapi/linux/kexec.h1
6 files changed, 361 insertions, 0 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index eb0b0a05751e..1b196bf99320 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -665,6 +665,16 @@ config PARAVIRT_TIME_ACCOUNTING
665 665
666 If in doubt, say N here. 666 If in doubt, say N here.
667 667
668config KEXEC
669 depends on PM_SLEEP_SMP
670 select KEXEC_CORE
671 bool "kexec system call"
672 ---help---
673 kexec is a system call that implements the ability to shutdown your
674 current kernel, and to start another kernel. It is like a reboot
675 but it is independent of the system firmware. And like a reboot
676 you can start any kernel with it, not just Linux.
677
668config XEN_DOM0 678config XEN_DOM0
669 def_bool y 679 def_bool y
670 depends on XEN 680 depends on XEN
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 000000000000..04744dc5fb61
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ARM64_KEXEC_H
13#define _ARM64_KEXEC_H
14
15/* Maximum physical address we can use pages from */
16
17#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
18
19/* Maximum address we can reach in physical address mode */
20
21#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
22
23/* Maximum address we can use for the control code buffer */
24
25#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
26
27#define KEXEC_CONTROL_PAGE_SIZE 4096
28
29#define KEXEC_ARCH KEXEC_ARCH_AARCH64
30
31#ifndef __ASSEMBLY__
32
33/**
34 * crash_setup_regs() - save registers for the panic kernel
35 *
36 * @newregs: registers are saved here
37 * @oldregs: registers to be saved (may be %NULL)
38 */
39
40static inline void crash_setup_regs(struct pt_regs *newregs,
41 struct pt_regs *oldregs)
42{
43 /* Empty routine needed to avoid build errors. */
44}
45
46#endif /* __ASSEMBLY__ */
47
48#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 2173149d8954..7700c0c23962 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -46,6 +46,8 @@ arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
46arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o 46arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
47arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 47arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
48arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o 48arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
49arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
50 cpu-reset.o
49 51
50obj-y += $(arm64-obj-y) vdso/ 52obj-y += $(arm64-obj-y) vdso/
51obj-m += $(arm64-obj-m) 53obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 000000000000..c40e64607545
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,170 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kexec.h>
13#include <linux/smp.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cpu_ops.h>
17#include <asm/mmu_context.h>
18
19#include "cpu-reset.h"
20
21/* Global variables for the arm64_relocate_new_kernel routine. */
22extern const unsigned char arm64_relocate_new_kernel[];
23extern const unsigned long arm64_relocate_new_kernel_size;
24
25static unsigned long kimage_start;
26
27void machine_kexec_cleanup(struct kimage *kimage)
28{
29 /* Empty routine needed to avoid build errors. */
30}
31
32/**
33 * machine_kexec_prepare - Prepare for a kexec reboot.
34 *
35 * Called from the core kexec code when a kernel image is loaded.
36 * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
37 * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
38 */
39int machine_kexec_prepare(struct kimage *kimage)
40{
41 kimage_start = kimage->start;
42
43 if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
44 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
45 return -EBUSY;
46 }
47
48 return 0;
49}
50
51/**
52 * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
53 */
54static void kexec_list_flush(struct kimage *kimage)
55{
56 kimage_entry_t *entry;
57
58 for (entry = &kimage->head; ; entry++) {
59 unsigned int flag;
60 void *addr;
61
62 /* flush the list entries. */
63 __flush_dcache_area(entry, sizeof(kimage_entry_t));
64
65 flag = *entry & IND_FLAGS;
66 if (flag == IND_DONE)
67 break;
68
69 addr = phys_to_virt(*entry & PAGE_MASK);
70
71 switch (flag) {
72 case IND_INDIRECTION:
73 /* Set entry point just before the new list page. */
74 entry = (kimage_entry_t *)addr - 1;
75 break;
76 case IND_SOURCE:
77 /* flush the source pages. */
78 __flush_dcache_area(addr, PAGE_SIZE);
79 break;
80 case IND_DESTINATION:
81 break;
82 default:
83 BUG();
84 }
85 }
86}
87
88/**
89 * kexec_segment_flush - Helper to flush the kimage segments to PoC.
90 */
91static void kexec_segment_flush(const struct kimage *kimage)
92{
93 unsigned long i;
94
95 pr_debug("%s:\n", __func__);
96
97 for (i = 0; i < kimage->nr_segments; i++) {
98 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
99 i,
100 kimage->segment[i].mem,
101 kimage->segment[i].mem + kimage->segment[i].memsz,
102 kimage->segment[i].memsz,
103 kimage->segment[i].memsz / PAGE_SIZE);
104
105 __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
106 kimage->segment[i].memsz);
107 }
108}
109
110/**
111 * machine_kexec - Do the kexec reboot.
112 *
113 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
114 */
115void machine_kexec(struct kimage *kimage)
116{
117 phys_addr_t reboot_code_buffer_phys;
118 void *reboot_code_buffer;
119
120 /*
121 * New cpus may have become stuck_in_kernel after we loaded the image.
122 */
123 BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
124
125 reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
126 reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
127
128 /*
129 * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
130 * after the kernel is shut down.
131 */
132 memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
133 arm64_relocate_new_kernel_size);
134
135 /* Flush the reboot_code_buffer in preparation for its execution. */
136 __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
137 flush_icache_range((uintptr_t)reboot_code_buffer,
138 arm64_relocate_new_kernel_size);
139
140 /* Flush the kimage list and its buffers. */
141 kexec_list_flush(kimage);
142
143 /* Flush the new image if already in place. */
144 if (kimage->head & IND_DONE)
145 kexec_segment_flush(kimage);
146
147 pr_info("Bye!\n");
148
149 /* Disable all DAIF exceptions. */
150 asm volatile ("msr daifset, #0xf" : : : "memory");
151
152 /*
153 * cpu_soft_restart will shutdown the MMU, disable data caches, then
154 * transfer control to the reboot_code_buffer which contains a copy of
155 * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
156 * uses physical addressing to relocate the new image to its final
157 * position and transfers control to the image entry point when the
158 * relocation is complete.
159 */
160
161 cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
162 kimage_start, 0);
163
164 BUG(); /* Should never get here. */
165}
166
167void machine_crash_shutdown(struct pt_regs *regs)
168{
169 /* Empty routine needed to avoid build errors. */
170}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..51b73cdde287
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,130 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kexec.h>
13#include <linux/linkage.h>
14
15#include <asm/assembler.h>
16#include <asm/kexec.h>
17#include <asm/page.h>
18#include <asm/sysreg.h>
19
20/*
21 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
22 *
23 * The memory that the old kernel occupies may be overwritten when coping the
24 * new image to its final location. To assure that the
25 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
26 * all code and data needed by arm64_relocate_new_kernel must be between the
27 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
28 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
29 * control_code_page, a special page which has been set up to be preserved
30 * during the copy operation.
31 */
32ENTRY(arm64_relocate_new_kernel)
33
34 /* Setup the list loop variables. */
35 mov x17, x1 /* x17 = kimage_start */
36 mov x16, x0 /* x16 = kimage_head */
37 dcache_line_size x15, x0 /* x15 = dcache line size */
38 mov x14, xzr /* x14 = entry ptr */
39 mov x13, xzr /* x13 = copy dest */
40
41 /* Clear the sctlr_el2 flags. */
42 mrs x0, CurrentEL
43 cmp x0, #CurrentEL_EL2
44 b.ne 1f
45 mrs x0, sctlr_el2
46 ldr x1, =SCTLR_ELx_FLAGS
47 bic x0, x0, x1
48 msr sctlr_el2, x0
49 isb
501:
51
52 /* Check if the new image needs relocation. */
53 tbnz x16, IND_DONE_BIT, .Ldone
54
55.Lloop:
56 and x12, x16, PAGE_MASK /* x12 = addr */
57
58 /* Test the entry flags. */
59.Ltest_source:
60 tbz x16, IND_SOURCE_BIT, .Ltest_indirection
61
62 /* Invalidate dest page to PoC. */
63 mov x0, x13
64 add x20, x0, #PAGE_SIZE
65 sub x1, x15, #1
66 bic x0, x0, x1
672: dc ivac, x0
68 add x0, x0, x15
69 cmp x0, x20
70 b.lo 2b
71 dsb sy
72
73 mov x20, x13
74 mov x21, x12
75 copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
76
77 /* dest += PAGE_SIZE */
78 add x13, x13, PAGE_SIZE
79 b .Lnext
80
81.Ltest_indirection:
82 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
83
84 /* ptr = addr */
85 mov x14, x12
86 b .Lnext
87
88.Ltest_destination:
89 tbz x16, IND_DESTINATION_BIT, .Lnext
90
91 /* dest = addr */
92 mov x13, x12
93
94.Lnext:
95 /* entry = *ptr++ */
96 ldr x16, [x14], #8
97
98 /* while (!(entry & DONE)) */
99 tbz x16, IND_DONE_BIT, .Lloop
100
101.Ldone:
102 /* wait for writes from copy_page to finish */
103 dsb nsh
104 ic iallu
105 dsb nsh
106 isb
107
108 /* Start new image. */
109 mov x0, xzr
110 mov x1, xzr
111 mov x2, xzr
112 mov x3, xzr
113 br x17
114
115ENDPROC(arm64_relocate_new_kernel)
116
117.ltorg
118
119.align 3 /* To keep the 64-bit values below naturally aligned. */
120
121.Lcopy_end:
122.org KEXEC_CONTROL_PAGE_SIZE
123
124/*
125 * arm64_relocate_new_kernel_size - Number of bytes to copy to the
126 * control_code_page.
127 */
128.globl arm64_relocate_new_kernel_size
129arm64_relocate_new_kernel_size:
130 .quad .Lcopy_end - arm64_relocate_new_kernel
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 99048e501b88..aae5ebf2022b 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -39,6 +39,7 @@
39#define KEXEC_ARCH_SH (42 << 16) 39#define KEXEC_ARCH_SH (42 << 16)
40#define KEXEC_ARCH_MIPS_LE (10 << 16) 40#define KEXEC_ARCH_MIPS_LE (10 << 16)
41#define KEXEC_ARCH_MIPS ( 8 << 16) 41#define KEXEC_ARCH_MIPS ( 8 << 16)
42#define KEXEC_ARCH_AARCH64 (183 << 16)
42 43
43/* The artificial cap on the number of segments passed to kexec_load. */ 44/* The artificial cap on the number of segments passed to kexec_load. */
44#define KEXEC_SEGMENT_MAX 16 45#define KEXEC_SEGMENT_MAX 16