aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Holzheu <holzheu@linux.vnet.ibm.com>2011-10-30 10:16:40 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-10-30 10:16:42 -0400
commit60a0c68df2632feaa4a986af084650d1165d89c5 (patch)
treef55907defeab43de02a5a3127c8d5a694a21b3a2
parent7f0bf656c66e4292e965c95fd9de55c72b6578bb (diff)
[S390] kdump backend code
This patch provides the architecture specific part of the s390 kdump support. Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/reset.h2
-rw-r--r--arch/s390/include/asm/setup.h13
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/base.S6
-rw-r--r--arch/s390/kernel/crash_dump.c427
-rw-r--r--arch/s390/kernel/head.S22
-rw-r--r--arch/s390/kernel/head_kdump.S119
-rw-r--r--arch/s390/kernel/ipl.c12
-rw-r--r--arch/s390/kernel/machine_kexec.c161
-rw-r--r--arch/s390/kernel/mem_detect.c69
-rw-r--r--arch/s390/kernel/reipl.S6
-rw-r--r--arch/s390/kernel/reipl64.S5
-rw-r--r--arch/s390/kernel/setup.c208
-rw-r--r--arch/s390/kernel/smp.c19
-rw-r--r--arch/s390/mm/vmem.c6
-rw-r--r--drivers/s390/cio/cio.c2
19 files changed, 1079 insertions, 13 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6b99fc3f9b63..a9fbd43395f7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -569,6 +569,16 @@ config KEXEC
569 current kernel, and to start another kernel. It is like a reboot 569 current kernel, and to start another kernel. It is like a reboot
570 but is independent of hardware/microcode support. 570 but is independent of hardware/microcode support.
571 571
572config CRASH_DUMP
573 bool "kernel crash dumps"
574 depends on 64BIT
575 help
576 Generate crash dump after being started by kexec.
577 Crash dump kernels are loaded in the main kernel with kexec-tools
578 into a specially reserved region and then later executed after
579 a crash by kdump/kexec.
580 For more details see Documentation/kdump/kdump.txt
581
572config ZFCPDUMP 582config ZFCPDUMP
573 def_bool n 583 def_bool n
574 prompt "zfcpdump support" 584 prompt "zfcpdump support"
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 97cc4403fabf..6940abfbe1d9 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -168,5 +168,6 @@ enum diag308_rc {
168 168
169extern int diag308(unsigned long subcode, void *addr); 169extern int diag308(unsigned long subcode, void *addr);
170extern void diag308_reset(void); 170extern void diag308_reset(void);
171extern void store_status(void);
171 172
172#endif /* _ASM_S390_IPL_H */ 173#endif /* _ASM_S390_IPL_H */
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index bb729b84a21e..fb1c96fa348c 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -30,6 +30,9 @@
30/* Not more than 2GB */ 30/* Not more than 2GB */
31#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) 31#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
32 32
33/* Maximum address we can use for the crash control pages */
34#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
35
33/* Allocate one page for the pdp and the second for the code */ 36/* Allocate one page for the pdp and the second for the code */
34#define KEXEC_CONTROL_PAGE_SIZE 4096 37#define KEXEC_CONTROL_PAGE_SIZE 4096
35 38
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index f584f4a52581..3d6ad4ad2a3f 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -17,5 +17,5 @@ struct reset_call {
17 17
18extern void register_reset_call(struct reset_call *reset); 18extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void (*func)(void *), void *data);
21#endif /* _ASM_S390_RESET_H */ 21#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index d5e2ef10537d..5a099714df04 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -26,15 +26,21 @@
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
28#define INITRD_SIZE (*(unsigned long *) (0x10414)) 28#define INITRD_SIZE (*(unsigned long *) (0x10414))
29#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
30#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
29#else /* __s390x__ */ 31#else /* __s390x__ */
30#define IPL_DEVICE (*(unsigned long *) (0x10400)) 32#define IPL_DEVICE (*(unsigned long *) (0x10400))
31#define INITRD_START (*(unsigned long *) (0x10408)) 33#define INITRD_START (*(unsigned long *) (0x10408))
32#define INITRD_SIZE (*(unsigned long *) (0x10410)) 34#define INITRD_SIZE (*(unsigned long *) (0x10410))
35#define OLDMEM_BASE (*(unsigned long *) (0x10418))
36#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
33#endif /* __s390x__ */ 37#endif /* __s390x__ */
34#define COMMAND_LINE ((char *) (0x10480)) 38#define COMMAND_LINE ((char *) (0x10480))
35 39
36#define CHUNK_READ_WRITE 0 40#define CHUNK_READ_WRITE 0
37#define CHUNK_READ_ONLY 1 41#define CHUNK_READ_ONLY 1
42#define CHUNK_OLDMEM 4
43#define CHUNK_CRASHK 5
38 44
39struct mem_chunk { 45struct mem_chunk {
40 unsigned long addr; 46 unsigned long addr;
@@ -48,6 +54,8 @@ extern int memory_end_set;
48extern unsigned long memory_end; 54extern unsigned long memory_end;
49 55
50void detect_memory_layout(struct mem_chunk chunk[]); 56void detect_memory_layout(struct mem_chunk chunk[]);
57void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
58 unsigned long size, int type);
51 59
52#define PRIMARY_SPACE_MODE 0 60#define PRIMARY_SPACE_MODE 0
53#define ACCESS_REGISTER_MODE 1 61#define ACCESS_REGISTER_MODE 1
@@ -106,6 +114,7 @@ extern unsigned int user_mode;
106#endif /* __s390x__ */ 114#endif /* __s390x__ */
107 115
108#define ZFCPDUMP_HSA_SIZE (32UL<<20) 116#define ZFCPDUMP_HSA_SIZE (32UL<<20)
117#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
109 118
110/* 119/*
111 * Console mode. Override with conmode= 120 * Console mode. Override with conmode=
@@ -134,10 +143,14 @@ extern char kernel_nss_name[];
134#define IPL_DEVICE 0x10404 143#define IPL_DEVICE 0x10404
135#define INITRD_START 0x1040C 144#define INITRD_START 0x1040C
136#define INITRD_SIZE 0x10414 145#define INITRD_SIZE 0x10414
146#define OLDMEM_BASE 0x1041C
147#define OLDMEM_SIZE 0x10424
137#else /* __s390x__ */ 148#else /* __s390x__ */
138#define IPL_DEVICE 0x10400 149#define IPL_DEVICE 0x10400
139#define INITRD_START 0x10408 150#define INITRD_START 0x10408
140#define INITRD_SIZE 0x10410 151#define INITRD_SIZE 0x10410
152#define OLDMEM_BASE 0x10418
153#define OLDMEM_SIZE 0x10420
141#endif /* __s390x__ */ 154#endif /* __s390x__ */
142#define COMMAND_LINE 0x10480 155#define COMMAND_LINE 0x10480
143 156
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index df3732249baa..dd4f07640919 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
51obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
51 52
52# Kexec part 53# Kexec part
53S390_KEXEC_OBJS := machine_kexec.o crash.o 54S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 255435663bf8..f8828d38fa6e 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -86,6 +86,8 @@ s390_base_pgm_handler_fn:
86ENTRY(diag308_reset) 86ENTRY(diag308_reset)
87 larl %r4,.Lctlregs # Save control registers 87 larl %r4,.Lctlregs # Save control registers
88 stctg %c0,%c15,0(%r4) 88 stctg %c0,%c15,0(%r4)
89 larl %r4,.Lfpctl # Floating point control register
90 stfpc 0(%r4)
89 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 91 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
90 lghi %r3,0 92 lghi %r3,0
91 lg %r4,0(%r4) # Save PSW 93 lg %r4,0(%r4) # Save PSW
@@ -99,6 +101,8 @@ ENTRY(diag308_reset)
99 sam64 # Switch to 64 bit addressing mode 101 sam64 # Switch to 64 bit addressing mode
100 larl %r4,.Lctlregs # Restore control registers 102 larl %r4,.Lctlregs # Restore control registers
101 lctlg %c0,%c15,0(%r4) 103 lctlg %c0,%c15,0(%r4)
104 larl %r4,.Lfpctl # Restore floating point ctl register
105 lfpc 0(%r4)
102 br %r14 106 br %r14
103.align 16 107.align 16
104.Lrestart_psw: 108.Lrestart_psw:
@@ -110,6 +114,8 @@ ENTRY(diag308_reset)
110 .rept 16 114 .rept 16
111 .quad 0 115 .quad 0
112 .endr 116 .endr
117.Lfpctl:
118 .long 0
113 .previous 119 .previous
114 120
115#else /* CONFIG_64BIT */ 121#else /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
new file mode 100644
index 000000000000..2a9a3f405574
--- /dev/null
+++ b/arch/s390/kernel/crash_dump.c
@@ -0,0 +1,427 @@
1/*
2 * S390 kdump implementation
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/crash_dump.h>
9#include <asm/lowcore.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/gfp.h>
13#include <linux/slab.h>
14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
16#include <linux/elf.h>
17#include <asm/ipl.h>
18
19#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
20#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
21#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
22
23/*
24 * Copy one page from "oldmem"
25 *
26 * For the kdump reserved memory this functions performs a swap operation:
27 * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
28 * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
29 */
30ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
31 size_t csize, unsigned long offset, int userbuf)
32{
33 unsigned long src;
34 int rc;
35
36 if (!csize)
37 return 0;
38
39 src = (pfn << PAGE_SHIFT) + offset;
40 if (src < OLDMEM_SIZE)
41 src += OLDMEM_BASE;
42 else if (src > OLDMEM_BASE &&
43 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE;
45 if (userbuf)
46 rc = copy_to_user_real((void __user *) buf, (void *) src,
47 csize);
48 else
49 rc = memcpy_real(buf, (void *) src, csize);
50 return rc < 0 ? rc : csize;
51}
52
53/*
54 * Copy memory from old kernel
55 */
56static int copy_from_oldmem(void *dest, void *src, size_t count)
57{
58 unsigned long copied = 0;
59 int rc;
60
61 if ((unsigned long) src < OLDMEM_SIZE) {
62 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
63 rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
64 if (rc)
65 return rc;
66 }
67 return memcpy_real(dest + copied, src + copied, count - copied);
68}
69
70/*
71 * Alloc memory and panic in case of ENOMEM
72 */
73static void *kzalloc_panic(int len)
74{
75 void *rc;
76
77 rc = kzalloc(len, GFP_KERNEL);
78 if (!rc)
79 panic("s390 kdump kzalloc (%d) failed", len);
80 return rc;
81}
82
83/*
84 * Get memory layout and create hole for oldmem
85 */
86static struct mem_chunk *get_memory_layout(void)
87{
88 struct mem_chunk *chunk_array;
89
90 chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
91 detect_memory_layout(chunk_array);
92 create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
93 return chunk_array;
94}
95
96/*
97 * Initialize ELF note
98 */
99static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
100 const char *name)
101{
102 Elf64_Nhdr *note;
103 u64 len;
104
105 note = (Elf64_Nhdr *)buf;
106 note->n_namesz = strlen(name) + 1;
107 note->n_descsz = d_len;
108 note->n_type = type;
109 len = sizeof(Elf64_Nhdr);
110
111 memcpy(buf + len, name, note->n_namesz);
112 len = roundup(len + note->n_namesz, 4);
113
114 memcpy(buf + len, desc, note->n_descsz);
115 len = roundup(len + note->n_descsz, 4);
116
117 return PTR_ADD(buf, len);
118}
119
120/*
121 * Initialize prstatus note
122 */
123static void *nt_prstatus(void *ptr, struct save_area *sa)
124{
125 struct elf_prstatus nt_prstatus;
126 static int cpu_nr = 1;
127
128 memset(&nt_prstatus, 0, sizeof(nt_prstatus));
129 memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
130 memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
131 memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
132 nt_prstatus.pr_pid = cpu_nr;
133 cpu_nr++;
134
135 return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
136 "CORE");
137}
138
139/*
140 * Initialize fpregset (floating point) note
141 */
142static void *nt_fpregset(void *ptr, struct save_area *sa)
143{
144 elf_fpregset_t nt_fpregset;
145
146 memset(&nt_fpregset, 0, sizeof(nt_fpregset));
147 memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
148 memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
149
150 return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
151 "CORE");
152}
153
154/*
155 * Initialize timer note
156 */
157static void *nt_s390_timer(void *ptr, struct save_area *sa)
158{
159 return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
160 KEXEC_CORE_NOTE_NAME);
161}
162
163/*
164 * Initialize TOD clock comparator note
165 */
166static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
167{
168 return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
169 sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
170}
171
172/*
173 * Initialize TOD programmable register note
174 */
175static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
176{
177 return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
178 sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
179}
180
181/*
182 * Initialize control register note
183 */
184static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
185{
186 return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
187 sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
188}
189
190/*
191 * Initialize prefix register note
192 */
193static void *nt_s390_prefix(void *ptr, struct save_area *sa)
194{
195 return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
196 sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
197}
198
199/*
200 * Fill ELF notes for one CPU with save area registers
201 */
202void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
203{
204 ptr = nt_prstatus(ptr, sa);
205 ptr = nt_fpregset(ptr, sa);
206 ptr = nt_s390_timer(ptr, sa);
207 ptr = nt_s390_tod_cmp(ptr, sa);
208 ptr = nt_s390_tod_preg(ptr, sa);
209 ptr = nt_s390_ctrs(ptr, sa);
210 ptr = nt_s390_prefix(ptr, sa);
211 return ptr;
212}
213
214/*
215 * Initialize prpsinfo note (new kernel)
216 */
217static void *nt_prpsinfo(void *ptr)
218{
219 struct elf_prpsinfo prpsinfo;
220
221 memset(&prpsinfo, 0, sizeof(prpsinfo));
222 prpsinfo.pr_sname = 'R';
223 strcpy(prpsinfo.pr_fname, "vmlinux");
224 return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
225 KEXEC_CORE_NOTE_NAME);
226}
227
228/*
229 * Initialize vmcoreinfo note (new kernel)
230 */
231static void *nt_vmcoreinfo(void *ptr)
232{
233 char nt_name[11], *vmcoreinfo;
234 Elf64_Nhdr note;
235 void *addr;
236
237 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
238 return ptr;
239 memset(nt_name, 0, sizeof(nt_name));
240 if (copy_from_oldmem(&note, addr, sizeof(note)))
241 return ptr;
242 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
243 return ptr;
244 if (strcmp(nt_name, "VMCOREINFO") != 0)
245 return ptr;
246 vmcoreinfo = kzalloc_panic(note.n_descsz + 1);
247 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
248 return ptr;
249 vmcoreinfo[note.n_descsz + 1] = 0;
250 return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO");
251}
252
253/*
254 * Initialize ELF header (new kernel)
255 */
256static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
257{
258 memset(ehdr, 0, sizeof(*ehdr));
259 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
260 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
261 ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
262 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
263 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
264 ehdr->e_type = ET_CORE;
265 ehdr->e_machine = EM_S390;
266 ehdr->e_version = EV_CURRENT;
267 ehdr->e_phoff = sizeof(Elf64_Ehdr);
268 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
269 ehdr->e_phentsize = sizeof(Elf64_Phdr);
270 ehdr->e_phnum = mem_chunk_cnt + 1;
271 return ehdr + 1;
272}
273
274/*
275 * Return CPU count for ELF header (new kernel)
276 */
277static int get_cpu_cnt(void)
278{
279 int i, cpus = 0;
280
281 for (i = 0; zfcpdump_save_areas[i]; i++) {
282 if (zfcpdump_save_areas[i]->pref_reg == 0)
283 continue;
284 cpus++;
285 }
286 return cpus;
287}
288
289/*
290 * Return memory chunk count for ELF header (new kernel)
291 */
292static int get_mem_chunk_cnt(void)
293{
294 struct mem_chunk *chunk_array, *mem_chunk;
295 int i, cnt = 0;
296
297 chunk_array = get_memory_layout();
298 for (i = 0; i < MEMORY_CHUNKS; i++) {
299 mem_chunk = &chunk_array[i];
300 if (chunk_array[i].type != CHUNK_READ_WRITE &&
301 chunk_array[i].type != CHUNK_READ_ONLY)
302 continue;
303 if (mem_chunk->size == 0)
304 continue;
305 cnt++;
306 }
307 kfree(chunk_array);
308 return cnt;
309}
310
311/*
312 * Relocate pointer in order to allow vmcore code access the data
313 */
314static inline unsigned long relocate(unsigned long addr)
315{
316 return OLDMEM_BASE + addr;
317}
318
319/*
320 * Initialize ELF loads (new kernel)
321 */
322static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
323{
324 struct mem_chunk *chunk_array, *mem_chunk;
325 int i;
326
327 chunk_array = get_memory_layout();
328 for (i = 0; i < MEMORY_CHUNKS; i++) {
329 mem_chunk = &chunk_array[i];
330 if (mem_chunk->size == 0)
331 break;
332 if (chunk_array[i].type != CHUNK_READ_WRITE &&
333 chunk_array[i].type != CHUNK_READ_ONLY)
334 continue;
335 else
336 phdr->p_filesz = mem_chunk->size;
337 phdr->p_type = PT_LOAD;
338 phdr->p_offset = mem_chunk->addr;
339 phdr->p_vaddr = mem_chunk->addr;
340 phdr->p_paddr = mem_chunk->addr;
341 phdr->p_memsz = mem_chunk->size;
342 phdr->p_flags = PF_R | PF_W | PF_X;
343 phdr->p_align = PAGE_SIZE;
344 phdr++;
345 }
346 kfree(chunk_array);
347 return i;
348}
349
350/*
351 * Initialize notes (new kernel)
352 */
353static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
354{
355 struct save_area *sa;
356 void *ptr_start = ptr;
357 int i;
358
359 ptr = nt_prpsinfo(ptr);
360
361 for (i = 0; zfcpdump_save_areas[i]; i++) {
362 sa = zfcpdump_save_areas[i];
363 if (sa->pref_reg == 0)
364 continue;
365 ptr = fill_cpu_elf_notes(ptr, sa);
366 }
367 ptr = nt_vmcoreinfo(ptr);
368 memset(phdr, 0, sizeof(*phdr));
369 phdr->p_type = PT_NOTE;
370 phdr->p_offset = relocate(notes_offset);
371 phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
372 phdr->p_memsz = phdr->p_filesz;
373 return ptr;
374}
375
376/*
377 * Create ELF core header (new kernel)
378 */
379static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
380{
381 Elf64_Phdr *phdr_notes, *phdr_loads;
382 int mem_chunk_cnt;
383 void *ptr, *hdr;
384 u32 alloc_size;
385 u64 hdr_off;
386
387 mem_chunk_cnt = get_mem_chunk_cnt();
388
389 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
390 mem_chunk_cnt * sizeof(Elf64_Phdr);
391 hdr = kzalloc_panic(alloc_size);
392 /* Init elf header */
393 ptr = ehdr_init(hdr, mem_chunk_cnt);
394 /* Init program headers */
395 phdr_notes = ptr;
396 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
397 phdr_loads = ptr;
398 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
399 /* Init notes */
400 hdr_off = PTR_DIFF(ptr, hdr);
401 ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
402 /* Init loads */
403 hdr_off = PTR_DIFF(ptr, hdr);
404 loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
405 *elfcorebuf_sz = hdr_off;
406 *elfcorebuf = (void *) relocate((unsigned long) hdr);
407 BUG_ON(*elfcorebuf_sz > alloc_size);
408}
409
410/*
411 * Create kdump ELF core header in new kernel, if it has not been passed via
412 * the "elfcorehdr" kernel parameter
413 */
414static int setup_kdump_elfcorehdr(void)
415{
416 size_t elfcorebuf_sz;
417 char *elfcorebuf;
418
419 if (!OLDMEM_BASE || is_kdump_kernel())
420 return -EINVAL;
421 s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
422 elfcorehdr_addr = (unsigned long long) elfcorebuf;
423 elfcorehdr_size = elfcorebuf_sz;
424 return 0;
425}
426
427subsys_initcall(setup_kdump_elfcorehdr);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 2d781bab37bb..900068d2bf92 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -449,10 +449,28 @@ ENTRY(start)
449# 449#
450 .org 0x10000 450 .org 0x10000
451ENTRY(startup) 451ENTRY(startup)
452 j .Lep_startup_normal
453 .org 0x10008
454#
455# This is a list of s390 kernel entry points. At address 0x1000f the number of
456# valid entry points is stored.
457#
458# IMPORTANT: Do not change this table, it is s390 kernel ABI!
459#
460 .ascii "S390EP"
461 .byte 0x00,0x01
462#
463# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
464#
465 .org 0x10010
466ENTRY(startup_kdump)
467 j .Lep_startup_kdump
468.Lep_startup_normal:
452 basr %r13,0 # get base 469 basr %r13,0 # get base
453.LPG0: 470.LPG0:
454 xc 0x200(256),0x200 # partially clear lowcore 471 xc 0x200(256),0x200 # partially clear lowcore
455 xc 0x300(256),0x300 472 xc 0x300(256),0x300
473 xc 0xe00(256),0xe00
456 stck __LC_LAST_UPDATE_CLOCK 474 stck __LC_LAST_UPDATE_CLOCK
457 spt 5f-.LPG0(%r13) 475 spt 5f-.LPG0(%r13)
458 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) 476 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
@@ -534,6 +552,8 @@ ENTRY(startup)
534 .align 8 552 .align 8
5355: .long 0x7fffffff,0xffffffff 5535: .long 0x7fffffff,0xffffffff
536 554
555#include "head_kdump.S"
556
537# 557#
538# params at 10400 (setup.h) 558# params at 10400 (setup.h)
539# 559#
@@ -541,6 +561,8 @@ ENTRY(startup)
541 .long 0,0 # IPL_DEVICE 561 .long 0,0 # IPL_DEVICE
542 .long 0,0 # INITRD_START 562 .long 0,0 # INITRD_START
543 .long 0,0 # INITRD_SIZE 563 .long 0,0 # INITRD_SIZE
564 .long 0,0 # OLDMEM_BASE
565 .long 0,0 # OLDMEM_SIZE
544 566
545 .org COMMAND_LINE 567 .org COMMAND_LINE
546 .byte "root=/dev/ram0 ro" 568 .byte "root=/dev/ram0 ro"
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
new file mode 100644
index 000000000000..e1ac3893e972
--- /dev/null
+++ b/arch/s390/kernel/head_kdump.S
@@ -0,0 +1,119 @@
1/*
2 * S390 kdump lowlevel functions (new kernel)
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#define DATAMOVER_ADDR 0x4000
9#define COPY_PAGE_ADDR 0x6000
10
11#ifdef CONFIG_CRASH_DUMP
12
13#
14# kdump entry (new kernel - not yet relocated)
15#
16# Note: This code has to be position independent
17#
18
19.align 2
20.Lep_startup_kdump:
21 lhi %r1,2 # mode 2 = esame (dump)
22 sigp %r1,%r0,0x12 # Switch to esame mode
23 sam64 # Switch to 64 bit addressing
24 basr %r13,0
25.Lbase:
26 larl %r2,.Lbase_addr # Check, if we have been
27 lg %r2,0(%r2) # already relocated:
28 clgr %r2,%r13 #
29 jne .Lrelocate # No : Start data mover
30 lghi %r2,0 # Yes: Start kdump kernel
31 brasl %r14,startup_kdump_relocated
32
33.Lrelocate:
34 larl %r4,startup
35 lg %r2,0x418(%r4) # Get kdump base
36 lg %r3,0x420(%r4) # Get kdump size
37
38 larl %r10,.Lcopy_start # Source of data mover
39 lghi %r8,DATAMOVER_ADDR # Target of data mover
40 mvc 0(256,%r8),0(%r10) # Copy data mover code
41
42 agr %r8,%r2 # Copy data mover to
43 mvc 0(256,%r8),0(%r10) # reserved mem
44
45 lghi %r14,DATAMOVER_ADDR # Jump to copied data mover
46 basr %r14,%r14
47.Lbase_addr:
48 .quad .Lbase
49
50#
51# kdump data mover code (runs at address DATAMOVER_ADDR)
52#
53# r2: kdump base address
54# r3: kdump size
55#
56.Lcopy_start:
57 basr %r13,0 # Base
580:
59 lgr %r11,%r2 # Save kdump base address
60 lgr %r12,%r2
61 agr %r12,%r3 # Compute kdump end address
62
63 lghi %r5,0
64 lghi %r10,COPY_PAGE_ADDR # Load copy page address
651:
66 mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp
67 mvc 0(256,%r5),0(%r11) # Copy new kernel to old
68 mvc 0(256,%r11),0(%r10) # Copy tmp to new
69 aghi %r11,256
70 aghi %r5,256
71 clgr %r11,%r12
72 jl 1b
73
74 lg %r14,.Lstartup_kdump-0b(%r13)
75 basr %r14,%r14 # Start relocated kernel
76.Lstartup_kdump:
77 .long 0x00000000,0x00000000 + startup_kdump_relocated
78.Lcopy_end:
79
80#
81# Startup of kdump (relocated new kernel)
82#
83.align 2
84startup_kdump_relocated:
85 basr %r13,0
860:
87 mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
88 mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW
89 lhi %r1,1 # Start new kernel
90 diag %r1,%r1,0x308 # with diag 308
91
92.Lno_diag308: # No diag 308
93 sam31 # Switch to 31 bit addr mode
94 sr %r1,%r1 # Erase register r1
95 sr %r2,%r2 # Erase register r2
96 sigp %r1,%r2,0x12 # Switch to 31 bit arch mode
97 lpsw 0 # Start new kernel...
98.align 8
99.Lrestart_psw:
100 .long 0x00080000,0x80000000 + startup
101.Lpgm_psw:
102 .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308
103#else
104.align 2
105.Lep_startup_kdump:
106#ifdef CONFIG_64BIT
107 larl %r13,startup_kdump_crash
108 lpswe 0(%r13)
109.align 8
110startup_kdump_crash:
111 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
112#else
113 basr %r13,0
1140: lpsw startup_kdump_crash-0b(%r13)
115.align 8
116startup_kdump_crash:
117 .long 0x000a0000,0x00000000 + startup_kdump_crash
118#endif /* CONFIG_64BIT */
119#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 90769b4bc7f6..ca0520c52547 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -16,6 +16,7 @@
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/gfp.h> 18#include <linux/gfp.h>
19#include <linux/crash_dump.h>
19#include <asm/ipl.h> 20#include <asm/ipl.h>
20#include <asm/smp.h> 21#include <asm/smp.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
@@ -1740,6 +1741,9 @@ void do_restart(void)
1740{ 1741{
1741 smp_restart_with_online_cpu(); 1742 smp_restart_with_online_cpu();
1742 smp_send_stop(); 1743 smp_send_stop();
1744#ifdef CONFIG_CRASH_DUMP
1745 crash_kexec(NULL);
1746#endif
1743 on_restart_trigger.action->fn(&on_restart_trigger); 1747 on_restart_trigger.action->fn(&on_restart_trigger);
1744 stop_run(&on_restart_trigger); 1748 stop_run(&on_restart_trigger);
1745} 1749}
@@ -2010,7 +2014,7 @@ static void do_reset_calls(void)
2010 2014
2011u32 dump_prefix_page; 2015u32 dump_prefix_page;
2012 2016
2013void s390_reset_system(void) 2017void s390_reset_system(void (*func)(void *), void *data)
2014{ 2018{
2015 struct _lowcore *lc; 2019 struct _lowcore *lc;
2016 2020
@@ -2038,6 +2042,10 @@ void s390_reset_system(void)
2038 S390_lowcore.program_new_psw.addr = 2042 S390_lowcore.program_new_psw.addr =
2039 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2043 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
2040 2044
2045 /* Store status at absolute zero */
2046 store_status();
2047
2041 do_reset_calls(); 2048 do_reset_calls();
2049 if (func)
2050 func(data);
2042} 2051}
2043
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b09b9c62573e..0ceac06a0299 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * arch/s390/kernel/machine_kexec.c 2 * arch/s390/kernel/machine_kexec.c
3 * 3 *
4 * Copyright IBM Corp. 2005,2006 4 * Copyright IBM Corp. 2005,2011
5 * 5 *
6 * Author(s): Rolf Adelsberger, 6 * Author(s): Rolf Adelsberger,
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
8 */ 9 */
9 10
10#include <linux/device.h> 11#include <linux/device.h>
@@ -21,12 +22,131 @@
21#include <asm/smp.h> 22#include <asm/smp.h>
22#include <asm/reset.h> 23#include <asm/reset.h>
23#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/diag.h>
26#include <asm/asm-offsets.h>
24 27
25typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 28typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
26 29
27extern const unsigned char relocate_kernel[]; 30extern const unsigned char relocate_kernel[];
28extern const unsigned long long relocate_kernel_len; 31extern const unsigned long long relocate_kernel_len;
29 32
33#ifdef CONFIG_CRASH_DUMP
34
35void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
36
37/*
38 * Create ELF notes for one CPU
39 */
40static void add_elf_notes(int cpu)
41{
42 struct save_area *sa = (void *) 4608 + store_prefix();
43 void *ptr;
44
45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
47 ptr = fill_cpu_elf_notes(ptr, sa);
48 memset(ptr, 0, sizeof(struct elf_note));
49}
50
51/*
52 * Store status of next available physical CPU
53 */
54static int store_status_next(int start_cpu, int this_cpu)
55{
56 struct save_area *sa = (void *) 4608 + store_prefix();
57 int cpu, rc;
58
59 for (cpu = start_cpu; cpu < 65536; cpu++) {
60 if (cpu == this_cpu)
61 continue;
62 do {
63 rc = raw_sigp(cpu, sigp_stop_and_store_status);
64 } while (rc == sigp_busy);
65 if (rc != sigp_order_code_accepted)
66 continue;
67 if (sa->pref_reg)
68 return cpu;
69 }
70 return -1;
71}
72
73/*
74 * Initialize CPU ELF notes
75 */
76void setup_regs(void)
77{
78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
79 int cpu, this_cpu, phys_cpu = 0, first = 1;
80
81 this_cpu = stap();
82
83 if (!S390_lowcore.prefixreg_save_area)
84 first = 0;
85 for_each_online_cpu(cpu) {
86 if (first) {
87 add_elf_notes(cpu);
88 first = 0;
89 continue;
90 }
91 phys_cpu = store_status_next(phys_cpu, this_cpu);
92 if (phys_cpu == -1)
93 break;
94 add_elf_notes(cpu);
95 phys_cpu++;
96 }
97 /* Copy dump CPU store status info to absolute zero */
98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
99}
100
101#endif
102
103/*
104 * Start kdump: We expect here that a store status has been done on our CPU
105 */
106static void __do_machine_kdump(void *image)
107{
108#ifdef CONFIG_CRASH_DUMP
109 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
110
111 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
112 setup_regs();
113 start_kdump(1);
114#endif
115}
116
117/*
118 * Check if kdump checksums are valid: We call purgatory with parameter "0"
119 */
120static int kdump_csum_valid(struct kimage *image)
121{
122#ifdef CONFIG_CRASH_DUMP
123 int (*start_kdump)(int) = (void *)image->start;
124 int rc;
125
126 __arch_local_irq_stnsm(0xfb); /* disable DAT */
127 rc = start_kdump(0);
128 __arch_local_irq_stosm(0x04); /* enable DAT */
129 return rc ? 0 : -EINVAL;
130#else
131 return -EINVAL;
132#endif
133}
134
135/*
136 * Give back memory to hypervisor before new kdump is loaded
137 */
138static int machine_kexec_prepare_kdump(void)
139{
140#ifdef CONFIG_CRASH_DUMP
141 if (MACHINE_IS_VM)
142 diag10_range(PFN_DOWN(crashk_res.start),
143 PFN_DOWN(crashk_res.end - crashk_res.start + 1));
144 return 0;
145#else
146 return -EINVAL;
147#endif
148}
149
30int machine_kexec_prepare(struct kimage *image) 150int machine_kexec_prepare(struct kimage *image)
31{ 151{
32 void *reboot_code_buffer; 152 void *reboot_code_buffer;
@@ -35,6 +155,9 @@ int machine_kexec_prepare(struct kimage *image)
35 if (ipl_flags & IPL_NSS_VALID) 155 if (ipl_flags & IPL_NSS_VALID)
36 return -ENOSYS; 156 return -ENOSYS;
37 157
158 if (image->type == KEXEC_TYPE_CRASH)
159 return machine_kexec_prepare_kdump();
160
38 /* We don't support anything but the default image type for now. */ 161 /* We don't support anything but the default image type for now. */
39 if (image->type != KEXEC_TYPE_DEFAULT) 162 if (image->type != KEXEC_TYPE_DEFAULT)
40 return -EINVAL; 163 return -EINVAL;
@@ -51,27 +174,53 @@ void machine_kexec_cleanup(struct kimage *image)
51{ 174{
52} 175}
53 176
177void arch_crash_save_vmcoreinfo(void)
178{
179 VMCOREINFO_SYMBOL(lowcore_ptr);
180 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
181}
182
54void machine_shutdown(void) 183void machine_shutdown(void)
55{ 184{
56} 185}
57 186
58static void __machine_kexec(void *data) 187/*
188 * Do normal kexec
189 */
190static void __do_machine_kexec(void *data)
59{ 191{
60 relocate_kernel_t data_mover; 192 relocate_kernel_t data_mover;
61 struct kimage *image = data; 193 struct kimage *image = data;
62 194
63 pfault_fini();
64 s390_reset_system();
65
66 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); 195 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
67 196
68 /* Call the moving routine */ 197 /* Call the moving routine */
69 (*data_mover)(&image->head, image->start); 198 (*data_mover)(&image->head, image->start);
70 for (;;);
71} 199}
72 200
201/*
202 * Reset system and call either kdump or normal kexec
203 */
204static void __machine_kexec(void *data)
205{
206 struct kimage *image = data;
207
208 pfault_fini();
209 if (image->type == KEXEC_TYPE_CRASH)
210 s390_reset_system(__do_machine_kdump, data);
211 else
212 s390_reset_system(__do_machine_kexec, data);
213 disabled_wait((unsigned long) __builtin_return_address(0));
214}
215
216/*
217 * Do either kdump or normal kexec. In case of kdump we first ask
218 * purgatory, if kdump checksums are valid.
219 */
73void machine_kexec(struct kimage *image) 220void machine_kexec(struct kimage *image)
74{ 221{
222 if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
223 return;
75 tracer_disable(); 224 tracer_disable();
76 smp_send_stop(); 225 smp_send_stop();
77 smp_switch_to_ipl_cpu(__machine_kexec, image); 226 smp_switch_to_ipl_cpu(__machine_kexec, image);
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 0fbe4e32f7ba..19b4568f4cee 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -62,3 +62,72 @@ void detect_memory_layout(struct mem_chunk chunk[])
62 arch_local_irq_restore(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
65
66/*
67 * Create memory hole with given address, size, and type
68 */
69void create_mem_hole(struct mem_chunk chunks[], unsigned long addr,
70 unsigned long size, int type)
71{
72 unsigned long start, end, new_size;
73 int i;
74
75 for (i = 0; i < MEMORY_CHUNKS; i++) {
76 if (chunks[i].size == 0)
77 continue;
78 if (addr + size < chunks[i].addr)
79 continue;
80 if (addr >= chunks[i].addr + chunks[i].size)
81 continue;
82 start = max(addr, chunks[i].addr);
83 end = min(addr + size, chunks[i].addr + chunks[i].size);
84 new_size = end - start;
85 if (new_size == 0)
86 continue;
87 if (start == chunks[i].addr &&
88 end == chunks[i].addr + chunks[i].size) {
89 /* Remove chunk */
90 chunks[i].type = type;
91 } else if (start == chunks[i].addr) {
92 /* Make chunk smaller at start */
93 if (i >= MEMORY_CHUNKS - 1)
94 panic("Unable to create memory hole");
95 memmove(&chunks[i + 1], &chunks[i],
96 sizeof(struct mem_chunk) *
97 (MEMORY_CHUNKS - (i + 1)));
98 chunks[i + 1].addr = chunks[i].addr + new_size;
99 chunks[i + 1].size = chunks[i].size - new_size;
100 chunks[i].size = new_size;
101 chunks[i].type = type;
102 i += 1;
103 } else if (end == chunks[i].addr + chunks[i].size) {
104 /* Make chunk smaller at end */
105 if (i >= MEMORY_CHUNKS - 1)
106 panic("Unable to create memory hole");
107 memmove(&chunks[i + 1], &chunks[i],
108 sizeof(struct mem_chunk) *
109 (MEMORY_CHUNKS - (i + 1)));
110 chunks[i + 1].addr = start;
111 chunks[i + 1].size = new_size;
112 chunks[i + 1].type = type;
113 chunks[i].size -= new_size;
114 i += 1;
115 } else {
116 /* Create memory hole */
117 if (i >= MEMORY_CHUNKS - 2)
118 panic("Unable to create memory hole");
119 memmove(&chunks[i + 2], &chunks[i],
120 sizeof(struct mem_chunk) *
121 (MEMORY_CHUNKS - (i + 2)));
122 chunks[i + 1].addr = addr;
123 chunks[i + 1].size = size;
124 chunks[i + 1].type = type;
125 chunks[i + 2].addr = addr + size;
126 chunks[i + 2].size =
127 chunks[i].addr + chunks[i].size - (addr + size);
128 chunks[i + 2].type = chunks[i].type;
129 chunks[i].size = addr - chunks[i].addr;
130 i += 2;
131 }
132 }
133}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 303d961c3bb5..ad67c214be04 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -10,6 +10,12 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11 11
12# 12#
13# store_status: Empty implementation until kdump is supported on 31 bit
14#
15ENTRY(store_status)
16 br %r14
17
18#
13# do_reipl_asm 19# do_reipl_asm
14# Parameter: r2 = schid of reipl device 20# Parameter: r2 = schid of reipl device
15# 21#
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index e690975403f4..a0f5b686a3cd 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -62,8 +62,11 @@ ENTRY(store_status)
62 larl %r2,store_status 62 larl %r2,store_status
63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) 63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
64 br %r14 64 br %r14
65.align 8 65
66 .section .bss
67 .align 8
66.Lclkcmp: .quad 0x0000000000000000 68.Lclkcmp: .quad 0x0000000000000000
69 .previous
67 70
68# 71#
69# do_reipl_asm 72# do_reipl_asm
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b371c37061d..b5a30412b2e5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,9 @@
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h> 44#include <linux/ftrace.h>
45#include <linux/kexec.h>
46#include <linux/crash_dump.h>
47#include <linux/memory.h>
45 48
46#include <asm/ipl.h> 49#include <asm/ipl.h>
47#include <asm/uaccess.h> 50#include <asm/uaccess.h>
@@ -57,6 +60,7 @@
57#include <asm/ebcdic.h> 60#include <asm/ebcdic.h>
58#include <asm/compat.h> 61#include <asm/compat.h>
59#include <asm/kvm_virtio.h> 62#include <asm/kvm_virtio.h>
63#include <asm/diag.h>
60 64
61long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | 65long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
62 PSW_MASK_MCHECK | PSW_DEFAULT_KEY); 66 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
@@ -435,6 +439,9 @@ static void __init setup_resources(void)
435 for (i = 0; i < MEMORY_CHUNKS; i++) { 439 for (i = 0; i < MEMORY_CHUNKS; i++) {
436 if (!memory_chunk[i].size) 440 if (!memory_chunk[i].size)
437 continue; 441 continue;
442 if (memory_chunk[i].type == CHUNK_OLDMEM ||
443 memory_chunk[i].type == CHUNK_CRASHK)
444 continue;
438 res = alloc_bootmem_low(sizeof(*res)); 445 res = alloc_bootmem_low(sizeof(*res));
439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 446 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
440 switch (memory_chunk[i].type) { 447 switch (memory_chunk[i].type) {
@@ -479,6 +486,7 @@ static void __init setup_memory_end(void)
479 unsigned long max_mem; 486 unsigned long max_mem;
480 int i; 487 int i;
481 488
489
482#ifdef CONFIG_ZFCPDUMP 490#ifdef CONFIG_ZFCPDUMP
483 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 491 if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
484 memory_end = ZFCPDUMP_HSA_SIZE; 492 memory_end = ZFCPDUMP_HSA_SIZE;
@@ -550,6 +558,187 @@ static void __init setup_restart_psw(void)
550 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); 558 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
551} 559}
552 560
561#ifdef CONFIG_CRASH_DUMP
562
563/*
564 * Find suitable location for crashkernel memory
565 */
566static unsigned long __init find_crash_base(unsigned long crash_size,
567 char **msg)
568{
569 unsigned long crash_base;
570 struct mem_chunk *chunk;
571 int i;
572
573 if (memory_chunk[0].size < crash_size) {
574 *msg = "first memory chunk must be at least crashkernel size";
575 return 0;
576 }
577 if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
578 return OLDMEM_BASE;
579
580 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
581 chunk = &memory_chunk[i];
582 if (chunk->size == 0)
583 continue;
584 if (chunk->type != CHUNK_READ_WRITE)
585 continue;
586 if (chunk->size < crash_size)
587 continue;
588 crash_base = (chunk->addr + chunk->size) - crash_size;
589 if (crash_base < crash_size)
590 continue;
591 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
592 continue;
593 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
594 continue;
595 return crash_base;
596 }
597 *msg = "no suitable area found";
598 return 0;
599}
600
601/*
602 * Check if crash_base and crash_size is valid
603 */
604static int __init verify_crash_base(unsigned long crash_base,
605 unsigned long crash_size,
606 char **msg)
607{
608 struct mem_chunk *chunk;
609 int i;
610
611 /*
612 * Because we do the swap to zero, we must have at least 'crash_size'
613 * bytes free space before crash_base
614 */
615 if (crash_size > crash_base) {
616 *msg = "crashkernel offset must be greater than size";
617 return -EINVAL;
618 }
619
620 /* First memory chunk must be at least crash_size */
621 if (memory_chunk[0].size < crash_size) {
622 *msg = "first memory chunk must be at least crashkernel size";
623 return -EINVAL;
624 }
625 /* Check if we fit into the respective memory chunk */
626 for (i = 0; i < MEMORY_CHUNKS; i++) {
627 chunk = &memory_chunk[i];
628 if (chunk->size == 0)
629 continue;
630 if (crash_base < chunk->addr)
631 continue;
632 if (crash_base >= chunk->addr + chunk->size)
633 continue;
634 /* we have found the memory chunk */
635 if (crash_base + crash_size > chunk->addr + chunk->size) {
636 *msg = "selected memory chunk is too small for "
637 "crashkernel memory";
638 return -EINVAL;
639 }
640 return 0;
641 }
642 *msg = "invalid memory range specified";
643 return -EINVAL;
644}
645
646/*
647 * Reserve kdump memory by creating a memory hole in the mem_chunk array
648 */
649static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
650 int type)
651{
652
653 create_mem_hole(memory_chunk, addr, size, type);
654}
655
656/*
657 * When kdump is enabled, we have to ensure that no memory from
658 * the area [0 - crashkernel memory size] and
659 * [crashk_res.start - crashk_res.end] is set offline.
660 */
661static int kdump_mem_notifier(struct notifier_block *nb,
662 unsigned long action, void *data)
663{
664 struct memory_notify *arg = data;
665
666 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
667 return NOTIFY_BAD;
668 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
669 return NOTIFY_OK;
670 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
671 return NOTIFY_OK;
672 return NOTIFY_BAD;
673}
674
675static struct notifier_block kdump_mem_nb = {
676 .notifier_call = kdump_mem_notifier,
677};
678
679#endif
680
681/*
682 * Make sure that oldmem, where the dump is stored, is protected
683 */
684static void reserve_oldmem(void)
685{
686#ifdef CONFIG_CRASH_DUMP
687 if (!OLDMEM_BASE)
688 return;
689
690 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
691 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
692 CHUNK_OLDMEM);
693 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
694 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
695 else
696 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
697#endif
698}
699
700/*
701 * Reserve memory for kdump kernel to be loaded with kexec
702 */
703static void __init reserve_crashkernel(void)
704{
705#ifdef CONFIG_CRASH_DUMP
706 unsigned long long crash_base, crash_size;
707 char *msg;
708 int rc;
709
710 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
711 &crash_base);
712 if (rc || crash_size == 0)
713 return;
714 crash_base = PAGE_ALIGN(crash_base);
715 crash_size = PAGE_ALIGN(crash_size);
716 if (register_memory_notifier(&kdump_mem_nb))
717 return;
718 if (!crash_base)
719 crash_base = find_crash_base(crash_size, &msg);
720 if (!crash_base) {
721 pr_info("crashkernel reservation failed: %s\n", msg);
722 unregister_memory_notifier(&kdump_mem_nb);
723 return;
724 }
725 if (verify_crash_base(crash_base, crash_size, &msg)) {
726 pr_info("crashkernel reservation failed: %s\n", msg);
727 unregister_memory_notifier(&kdump_mem_nb);
728 return;
729 }
730 if (!OLDMEM_BASE && MACHINE_IS_VM)
731 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
732 crashk_res.start = crash_base;
733 crashk_res.end = crash_base + crash_size - 1;
734 insert_resource(&iomem_resource, &crashk_res);
735 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_READ_WRITE);
736 pr_info("Reserving %lluMB of memory at %lluMB "
737 "for crashkernel (System RAM: %luMB)\n",
738 crash_size >> 20, crash_base >> 20, memory_end >> 20);
739#endif
740}
741
553static void __init 742static void __init
554setup_memory(void) 743setup_memory(void)
555{ 744{
@@ -580,6 +769,14 @@ setup_memory(void)
580 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { 769 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
581 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; 770 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
582 771
772#ifdef CONFIG_CRASH_DUMP
773 if (OLDMEM_BASE) {
774 /* Move initrd behind kdump oldmem */
775 if (start + INITRD_SIZE > OLDMEM_BASE &&
776 start < OLDMEM_BASE + OLDMEM_SIZE)
777 start = OLDMEM_BASE + OLDMEM_SIZE;
778 }
779#endif
583 if (start + INITRD_SIZE > memory_end) { 780 if (start + INITRD_SIZE > memory_end) {
584 pr_err("initrd extends beyond end of " 781 pr_err("initrd extends beyond end of "
585 "memory (0x%08lx > 0x%08lx) " 782 "memory (0x%08lx > 0x%08lx) "
@@ -644,6 +841,15 @@ setup_memory(void)
644 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, 841 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
645 BOOTMEM_DEFAULT); 842 BOOTMEM_DEFAULT);
646 843
844#ifdef CONFIG_CRASH_DUMP
845 if (crashk_res.start)
846 reserve_bootmem(crashk_res.start,
847 crashk_res.end - crashk_res.start + 1,
848 BOOTMEM_DEFAULT);
849 if (is_kdump_kernel())
850 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
851 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
852#endif
647#ifdef CONFIG_BLK_DEV_INITRD 853#ifdef CONFIG_BLK_DEV_INITRD
648 if (INITRD_START && INITRD_SIZE) { 854 if (INITRD_START && INITRD_SIZE) {
649 if (INITRD_START + INITRD_SIZE <= memory_end) { 855 if (INITRD_START + INITRD_SIZE <= memory_end) {
@@ -812,6 +1018,8 @@ setup_arch(char **cmdline_p)
812 setup_ipl(); 1018 setup_ipl();
813 setup_memory_end(); 1019 setup_memory_end();
814 setup_addressing_mode(); 1020 setup_addressing_mode();
1021 reserve_oldmem();
1022 reserve_crashkernel();
815 setup_memory(); 1023 setup_memory();
816 setup_resources(); 1024 setup_resources();
817 setup_restart_psw(); 1025 setup_restart_psw();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e4572601e91e..e3f51dfa5cad 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <linux/timex.h> 38#include <linux/timex.h>
39#include <linux/bootmem.h> 39#include <linux/bootmem.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/crash_dump.h>
41#include <asm/asm-offsets.h> 42#include <asm/asm-offsets.h>
42#include <asm/ipl.h> 43#include <asm/ipl.h>
43#include <asm/setup.h> 44#include <asm/setup.h>
@@ -304,11 +305,13 @@ void smp_ctl_clear_bit(int cr, int bit)
304} 305}
305EXPORT_SYMBOL(smp_ctl_clear_bit); 306EXPORT_SYMBOL(smp_ctl_clear_bit);
306 307
307#ifdef CONFIG_ZFCPDUMP 308#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
308 309
309static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 310static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
310{ 311{
311 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 312 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
313 return;
314 if (is_kdump_kernel())
312 return; 315 return;
313 if (cpu >= NR_CPUS) { 316 if (cpu >= NR_CPUS) {
314 pr_warning("CPU %i exceeds the maximum %i and is excluded from " 317 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
@@ -426,6 +429,18 @@ static void __init smp_detect_cpus(void)
426 info = kmalloc(sizeof(*info), GFP_KERNEL); 429 info = kmalloc(sizeof(*info), GFP_KERNEL);
427 if (!info) 430 if (!info)
428 panic("smp_detect_cpus failed to allocate memory\n"); 431 panic("smp_detect_cpus failed to allocate memory\n");
432#ifdef CONFIG_CRASH_DUMP
433 if (OLDMEM_BASE && !is_kdump_kernel()) {
434 struct save_area *save_area;
435
436 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
437 if (!save_area)
438 panic("could not allocate memory for save area\n");
439 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
440 0x200, 0);
441 zfcpdump_save_areas[0] = save_area;
442 }
443#endif
429 /* Use sigp detection algorithm if sclp doesn't work. */ 444 /* Use sigp detection algorithm if sclp doesn't work. */
430 if (sclp_get_cpu_info(info)) { 445 if (sclp_get_cpu_info(info)) {
431 smp_use_sigp_detection = 1; 446 smp_use_sigp_detection = 1;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 781ff5169560..4799383e2df9 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -335,6 +335,9 @@ void __init vmem_map_init(void)
335 ro_start = ((unsigned long)&_stext) & PAGE_MASK; 335 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
336 ro_end = PFN_ALIGN((unsigned long)&_eshared); 336 ro_end = PFN_ALIGN((unsigned long)&_eshared);
337 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 337 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
338 if (memory_chunk[i].type == CHUNK_CRASHK ||
339 memory_chunk[i].type == CHUNK_OLDMEM)
340 continue;
338 start = memory_chunk[i].addr; 341 start = memory_chunk[i].addr;
339 end = memory_chunk[i].addr + memory_chunk[i].size; 342 end = memory_chunk[i].addr + memory_chunk[i].size;
340 if (start >= ro_end || end <= ro_start) 343 if (start >= ro_end || end <= ro_start)
@@ -368,6 +371,9 @@ static int __init vmem_convert_memory_chunk(void)
368 for (i = 0; i < MEMORY_CHUNKS; i++) { 371 for (i = 0; i < MEMORY_CHUNKS; i++) {
369 if (!memory_chunk[i].size) 372 if (!memory_chunk[i].size)
370 continue; 373 continue;
374 if (memory_chunk[i].type == CHUNK_CRASHK ||
375 memory_chunk[i].type == CHUNK_OLDMEM)
376 continue;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 377 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg) 378 if (!seg)
373 panic("Out of memory...\n"); 379 panic("Out of memory...\n");
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5586c1376cb0..dc67c397449e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1069,7 +1069,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1069{ 1069{
1070 struct subchannel_id schid; 1070 struct subchannel_id schid;
1071 1071
1072 s390_reset_system(); 1072 s390_reset_system(NULL, NULL);
1073 if (reipl_find_schid(devid, &schid) != 0) 1073 if (reipl_find_schid(devid, &schid) != 0)
1074 panic("IPL Device not found\n"); 1074 panic("IPL Device not found\n");
1075 do_reipl_asm(*((__u32*)&schid)); 1075 do_reipl_asm(*((__u32*)&schid));