aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/proc_misc.c6
-rw-r--r--fs/proc/vmcore.c451
-rw-r--r--include/linux/crash_dump.h4
-rw-r--r--include/linux/proc_fs.h7
-rw-r--r--kernel/crash_dump.c2
7 files changed, 476 insertions, 1 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 8157f2e2d515..062177956239 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -734,6 +734,12 @@ config PROC_KCORE
734 bool "/proc/kcore support" if !ARM 734 bool "/proc/kcore support" if !ARM
735 depends on PROC_FS && MMU 735 depends on PROC_FS && MMU
736 736
737config PROC_VMCORE
738 bool "/proc/vmcore support (EXPERIMENTAL)"
739 depends on PROC_FS && EMBEDDED && EXPERIMENTAL && CRASH_DUMP
740 help
741 Exports the dump image of crashed kernel in ELF format.
742
737config SYSFS 743config SYSFS
738 bool "sysfs file system support" if EMBEDDED 744 bool "sysfs file system support" if EMBEDDED
739 default y 745 default y
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 738b9b602932..7431d7ba2d09 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -11,4 +11,5 @@ proc-y += inode.o root.o base.o generic.o array.o \
11 kmsg.o proc_tty.o proc_misc.o 11 kmsg.o proc_tty.o proc_misc.o
12 12
13proc-$(CONFIG_PROC_KCORE) += kcore.o 13proc-$(CONFIG_PROC_KCORE) += kcore.o
14proc-$(CONFIG_PROC_VMCORE) += vmcore.o
14proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o 15proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 94b570ad037d..a3453555a94e 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -44,6 +44,7 @@
44#include <linux/jiffies.h> 44#include <linux/jiffies.h>
45#include <linux/sysrq.h> 45#include <linux/sysrq.h>
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/crash_dump.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/pgtable.h> 49#include <asm/pgtable.h>
49#include <asm/io.h> 50#include <asm/io.h>
@@ -618,6 +619,11 @@ void __init proc_misc_init(void)
618 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; 619 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
619 } 620 }
620#endif 621#endif
622#ifdef CONFIG_PROC_VMCORE
623 proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
624 if (proc_vmcore)
625 proc_vmcore->proc_fops = &proc_vmcore_operations;
626#endif
621#ifdef CONFIG_MAGIC_SYSRQ 627#ifdef CONFIG_MAGIC_SYSRQ
622 entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL); 628 entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
623 if (entry) 629 if (entry)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
new file mode 100644
index 000000000000..8ad467855845
--- /dev/null
+++ b/fs/proc/vmcore.c
@@ -0,0 +1,451 @@
1/*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10#include <linux/config.h>
11#include <linux/mm.h>
12#include <linux/proc_fs.h>
13#include <linux/user.h>
14#include <linux/a.out.h>
15#include <linux/elf.h>
16#include <linux/elfcore.h>
17#include <linux/proc_fs.h>
18#include <linux/highmem.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
25
26/* List representing chunks of contiguous memory areas and their offsets in
27 * vmcore file.
28 */
29static LIST_HEAD(vmcore_list);
30
31/* Stores the pointer to the buffer containing kernel elf core headers. */
32static char *elfcorebuf;
33static size_t elfcorebuf_sz;
34
35/* Total size of vmcore file. */
36static u64 vmcore_size;
37
38struct proc_dir_entry *proc_vmcore = NULL;
39
40/* Reads a page from the oldmem device from given offset. */
41static ssize_t read_from_oldmem(char *buf, size_t count,
42 loff_t *ppos, int userbuf)
43{
44 unsigned long pfn, offset;
45 size_t nr_bytes;
46 ssize_t read = 0, tmp;
47
48 if (!count)
49 return 0;
50
51 offset = (unsigned long)(*ppos % PAGE_SIZE);
52 pfn = (unsigned long)(*ppos / PAGE_SIZE);
53 if (pfn > saved_max_pfn)
54 return -EINVAL;
55
56 do {
57 if (count > (PAGE_SIZE - offset))
58 nr_bytes = PAGE_SIZE - offset;
59 else
60 nr_bytes = count;
61
62 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
63 if (tmp < 0)
64 return tmp;
65 *ppos += nr_bytes;
66 count -= nr_bytes;
67 buf += nr_bytes;
68 read += nr_bytes;
69 ++pfn;
70 offset = 0;
71 } while (count);
72
73 return read;
74}
75
76/* Maps vmcore file offset to respective physical address in memroy. */
77static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
78 struct vmcore **m_ptr)
79{
80 struct vmcore *m;
81 u64 paddr;
82
83 list_for_each_entry(m, vc_list, list) {
84 u64 start, end;
85 start = m->offset;
86 end = m->offset + m->size - 1;
87 if (offset >= start && offset <= end) {
88 paddr = m->paddr + offset - start;
89 *m_ptr = m;
90 return paddr;
91 }
92 }
93 *m_ptr = NULL;
94 return 0;
95}
96
97/* Read from the ELF header and then the crash dump. On error, negative value is
98 * returned otherwise number of bytes read are returned.
99 */
100static ssize_t read_vmcore(struct file *file, char __user *buffer,
101 size_t buflen, loff_t *fpos)
102{
103 ssize_t acc = 0, tmp;
104 size_t tsz, nr_bytes;
105 u64 start;
106 struct vmcore *curr_m = NULL;
107
108 if (buflen == 0 || *fpos >= vmcore_size)
109 return 0;
110
111 /* trim buflen to not go beyond EOF */
112 if (buflen > vmcore_size - *fpos)
113 buflen = vmcore_size - *fpos;
114
115 /* Read ELF core header */
116 if (*fpos < elfcorebuf_sz) {
117 tsz = elfcorebuf_sz - *fpos;
118 if (buflen < tsz)
119 tsz = buflen;
120 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
121 return -EFAULT;
122 buflen -= tsz;
123 *fpos += tsz;
124 buffer += tsz;
125 acc += tsz;
126
127 /* leave now if filled buffer already */
128 if (buflen == 0)
129 return acc;
130 }
131
132 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
133 if (!curr_m)
134 return -EINVAL;
135 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
136 tsz = buflen;
137
138 /* Calculate left bytes in current memory segment. */
139 nr_bytes = (curr_m->size - (start - curr_m->paddr));
140 if (tsz > nr_bytes)
141 tsz = nr_bytes;
142
143 while (buflen) {
144 tmp = read_from_oldmem(buffer, tsz, &start, 1);
145 if (tmp < 0)
146 return tmp;
147 buflen -= tsz;
148 *fpos += tsz;
149 buffer += tsz;
150 acc += tsz;
151 if (start >= (curr_m->paddr + curr_m->size)) {
152 if (curr_m->list.next == &vmcore_list)
153 return acc; /*EOF*/
154 curr_m = list_entry(curr_m->list.next,
155 struct vmcore, list);
156 start = curr_m->paddr;
157 }
158 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
159 tsz = buflen;
160 /* Calculate left bytes in current memory segment. */
161 nr_bytes = (curr_m->size - (start - curr_m->paddr));
162 if (tsz > nr_bytes)
163 tsz = nr_bytes;
164 }
165 return acc;
166}
167
168static int open_vmcore(struct inode *inode, struct file *filp)
169{
170 return 0;
171}
172
173struct file_operations proc_vmcore_operations = {
174 .read = read_vmcore,
175 .open = open_vmcore,
176};
177
178static struct vmcore* __init get_new_element(void)
179{
180 struct vmcore *p;
181
182 p = kmalloc(sizeof(*p), GFP_KERNEL);
183 if (p)
184 memset(p, 0, sizeof(*p));
185 return p;
186}
187
188static u64 __init get_vmcore_size_elf64(char *elfptr)
189{
190 int i;
191 u64 size;
192 Elf64_Ehdr *ehdr_ptr;
193 Elf64_Phdr *phdr_ptr;
194
195 ehdr_ptr = (Elf64_Ehdr *)elfptr;
196 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
197 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
198 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
199 size += phdr_ptr->p_memsz;
200 phdr_ptr++;
201 }
202 return size;
203}
204
205/* Merges all the PT_NOTE headers into one. */
206static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
207 struct list_head *vc_list)
208{
209 int i, nr_ptnote=0, rc=0;
210 char *tmp;
211 Elf64_Ehdr *ehdr_ptr;
212 Elf64_Phdr phdr, *phdr_ptr;
213 Elf64_Nhdr *nhdr_ptr;
214 u64 phdr_sz = 0, note_off;
215
216 ehdr_ptr = (Elf64_Ehdr *)elfptr;
217 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
218 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
219 int j;
220 void *notes_section;
221 struct vmcore *new;
222 u64 offset, max_sz, sz, real_sz = 0;
223 if (phdr_ptr->p_type != PT_NOTE)
224 continue;
225 nr_ptnote++;
226 max_sz = phdr_ptr->p_memsz;
227 offset = phdr_ptr->p_offset;
228 notes_section = kmalloc(max_sz, GFP_KERNEL);
229 if (!notes_section)
230 return -ENOMEM;
231 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
232 if (rc < 0) {
233 kfree(notes_section);
234 return rc;
235 }
236 nhdr_ptr = notes_section;
237 for (j = 0; j < max_sz; j += sz) {
238 if (nhdr_ptr->n_namesz == 0)
239 break;
240 sz = sizeof(Elf64_Nhdr) +
241 ((nhdr_ptr->n_namesz + 3) & ~3) +
242 ((nhdr_ptr->n_descsz + 3) & ~3);
243 real_sz += sz;
244 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
245 }
246
247 /* Add this contiguous chunk of notes section to vmcore list.*/
248 new = get_new_element();
249 if (!new) {
250 kfree(notes_section);
251 return -ENOMEM;
252 }
253 new->paddr = phdr_ptr->p_offset;
254 new->size = real_sz;
255 list_add_tail(&new->list, vc_list);
256 phdr_sz += real_sz;
257 kfree(notes_section);
258 }
259
260 /* Prepare merged PT_NOTE program header. */
261 phdr.p_type = PT_NOTE;
262 phdr.p_flags = 0;
263 note_off = sizeof(Elf64_Ehdr) +
264 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
265 phdr.p_offset = note_off;
266 phdr.p_vaddr = phdr.p_paddr = 0;
267 phdr.p_filesz = phdr.p_memsz = phdr_sz;
268 phdr.p_align = 0;
269
270 /* Add merged PT_NOTE program header*/
271 tmp = elfptr + sizeof(Elf64_Ehdr);
272 memcpy(tmp, &phdr, sizeof(phdr));
273 tmp += sizeof(phdr);
274
275 /* Remove unwanted PT_NOTE program headers. */
276 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
277 *elfsz = *elfsz - i;
278 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
279
280 /* Modify e_phnum to reflect merged headers. */
281 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
282
283 return 0;
284}
285
286/* Add memory chunks represented by program headers to vmcore list. Also update
287 * the new offset fields of exported program headers. */
288static int __init process_ptload_program_headers_elf64(char *elfptr,
289 size_t elfsz,
290 struct list_head *vc_list)
291{
292 int i;
293 Elf64_Ehdr *ehdr_ptr;
294 Elf64_Phdr *phdr_ptr;
295 loff_t vmcore_off;
296 struct vmcore *new;
297
298 ehdr_ptr = (Elf64_Ehdr *)elfptr;
299 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
300
301 /* First program header is PT_NOTE header. */
302 vmcore_off = sizeof(Elf64_Ehdr) +
303 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
304 phdr_ptr->p_memsz; /* Note sections */
305
306 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
307 if (phdr_ptr->p_type != PT_LOAD)
308 continue;
309
310 /* Add this contiguous chunk of memory to vmcore list.*/
311 new = get_new_element();
312 if (!new)
313 return -ENOMEM;
314 new->paddr = phdr_ptr->p_offset;
315 new->size = phdr_ptr->p_memsz;
316 list_add_tail(&new->list, vc_list);
317
318 /* Update the program header offset. */
319 phdr_ptr->p_offset = vmcore_off;
320 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
321 }
322 return 0;
323}
324
325/* Sets offset fields of vmcore elements. */
326static void __init set_vmcore_list_offsets_elf64(char *elfptr,
327 struct list_head *vc_list)
328{
329 loff_t vmcore_off;
330 Elf64_Ehdr *ehdr_ptr;
331 struct vmcore *m;
332
333 ehdr_ptr = (Elf64_Ehdr *)elfptr;
334
335 /* Skip Elf header and program headers. */
336 vmcore_off = sizeof(Elf64_Ehdr) +
337 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
338
339 list_for_each_entry(m, vc_list, list) {
340 m->offset = vmcore_off;
341 vmcore_off += m->size;
342 }
343}
344
345static int __init parse_crash_elf64_headers(void)
346{
347 int rc=0;
348 Elf64_Ehdr ehdr;
349 u64 addr;
350
351 addr = elfcorehdr_addr;
352
353 /* Read Elf header */
354 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
355 if (rc < 0)
356 return rc;
357
358 /* Do some basic Verification. */
359 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
360 (ehdr.e_type != ET_CORE) ||
361 !elf_check_arch(&ehdr) ||
362 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
363 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
364 ehdr.e_version != EV_CURRENT ||
365 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
366 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
367 ehdr.e_phnum == 0) {
368 printk(KERN_WARNING "Warning: Core image elf header is not"
369 "sane\n");
370 return -EINVAL;
371 }
372
373 /* Read in all elf headers. */
374 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
375 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
376 if (!elfcorebuf)
377 return -ENOMEM;
378 addr = elfcorehdr_addr;
379 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
380 if (rc < 0) {
381 kfree(elfcorebuf);
382 return rc;
383 }
384
385 /* Merge all PT_NOTE headers into one. */
386 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
387 if (rc) {
388 kfree(elfcorebuf);
389 return rc;
390 }
391 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
392 &vmcore_list);
393 if (rc) {
394 kfree(elfcorebuf);
395 return rc;
396 }
397 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
398 return 0;
399}
400
401static int __init parse_crash_elf_headers(void)
402{
403 unsigned char e_ident[EI_NIDENT];
404 u64 addr;
405 int rc=0;
406
407 addr = elfcorehdr_addr;
408 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
409 if (rc < 0)
410 return rc;
411 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
412 printk(KERN_WARNING "Warning: Core image elf header"
413 " not found\n");
414 return -EINVAL;
415 }
416
417 if (e_ident[EI_CLASS] == ELFCLASS64) {
418 rc = parse_crash_elf64_headers();
419 if (rc)
420 return rc;
421
422 /* Determine vmcore size. */
423 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
424 } else {
425 printk(KERN_WARNING "Warning: Core image elf header is not"
426 " sane\n");
427 return -EINVAL;
428 }
429 return 0;
430}
431
432/* Init function for vmcore module. */
433static int __init vmcore_init(void)
434{
435 int rc = 0;
436
437 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
438 if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
439 return rc;
440 rc = parse_crash_elf_headers();
441 if (rc) {
442 printk(KERN_WARNING "Kdump: vmcore not initialized\n");
443 return rc;
444 }
445
446 /* Initialize /proc/vmcore size if proc is already up. */
447 if (proc_vmcore)
448 proc_vmcore->size = vmcore_size;
449 return 0;
450}
451module_init(vmcore_init)
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3f25fd1eaa4b..534d750d922d 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -7,8 +7,12 @@
7#include <linux/device.h> 7#include <linux/device.h>
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
9 9
10#define ELFCORE_ADDR_MAX (-1ULL)
10extern unsigned long long elfcorehdr_addr; 11extern unsigned long long elfcorehdr_addr;
11extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 12extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
12 unsigned long, int); 13 unsigned long, int);
14extern struct file_operations proc_vmcore_operations;
15extern struct proc_dir_entry *proc_vmcore;
16
13#endif /* CONFIG_CRASH_DUMP */ 17#endif /* CONFIG_CRASH_DUMP */
14#endif /* LINUX_CRASHDUMP_H */ 18#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 59e505261fd6..0563581e3a02 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -74,6 +74,13 @@ struct kcore_list {
74 size_t size; 74 size_t size;
75}; 75};
76 76
77struct vmcore {
78 struct list_head list;
79 unsigned long long paddr;
80 unsigned long size;
81 loff_t offset;
82};
83
77#ifdef CONFIG_PROC_FS 84#ifdef CONFIG_PROC_FS
78 85
79extern struct proc_dir_entry proc_root; 86extern struct proc_dir_entry proc_root;
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c
index 10b966c3744c..459ba49e376a 100644
--- a/kernel/crash_dump.c
+++ b/kernel/crash_dump.c
@@ -16,7 +16,7 @@
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17 17
18/* Stores the physical address of elf header of crash image. */ 18/* Stores the physical address of elf header of crash image. */
19unsigned long long elfcorehdr_addr; 19unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
20 20
21/* 21/*
22 * Copy a page from "oldmem". For this page, there is no pte mapped 22 * Copy a page from "oldmem". For this page, there is no pte mapped