diff options
Diffstat (limited to 'arch/mips/kernel/crash_dump.c')
-rw-r--r-- | arch/mips/kernel/crash_dump.c | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c new file mode 100644 index 000000000000..d9ec3898f9fa --- /dev/null +++ b/arch/mips/kernel/crash_dump.c | |||
@@ -0,0 +1,77 @@ | |||
1 | #include <linux/highmem.h> | ||
2 | #include <linux/bootmem.h> | ||
3 | #include <linux/crash_dump.h> | ||
4 | #include <asm/uaccess.h> | ||
5 | |||
6 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
7 | |||
8 | static int __init parse_savemaxmem(char *p) | ||
9 | { | ||
10 | if (p) | ||
11 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
12 | |||
13 | return 1; | ||
14 | } | ||
15 | __setup("savemaxmem=", parse_savemaxmem); | ||
16 | |||
17 | |||
18 | static void *kdump_buf_page; | ||
19 | |||
20 | /** | ||
21 | * copy_oldmem_page - copy one page from "oldmem" | ||
22 | * @pfn: page frame number to be copied | ||
23 | * @buf: target memory address for the copy; this can be in kernel address | ||
24 | * space or user address space (see @userbuf) | ||
25 | * @csize: number of bytes to copy | ||
26 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
27 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
28 | * otherwise @buf is in kernel address space, use memcpy(). | ||
29 | * | ||
30 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
31 | * in the current kernel. | ||
32 | * | ||
33 | * Calling copy_to_user() in atomic context is not desirable. Hence first | ||
34 | * copying the data to a pre-allocated kernel page and then copying to user | ||
35 | * space in non-atomic context. | ||
36 | */ | ||
37 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
38 | size_t csize, unsigned long offset, int userbuf) | ||
39 | { | ||
40 | void *vaddr; | ||
41 | |||
42 | if (!csize) | ||
43 | return 0; | ||
44 | |||
45 | vaddr = kmap_atomic_pfn(pfn); | ||
46 | |||
47 | if (!userbuf) { | ||
48 | memcpy(buf, (vaddr + offset), csize); | ||
49 | kunmap_atomic(vaddr); | ||
50 | } else { | ||
51 | if (!kdump_buf_page) { | ||
52 | pr_warning("Kdump: Kdump buffer page not allocated\n"); | ||
53 | |||
54 | return -EFAULT; | ||
55 | } | ||
56 | copy_page(kdump_buf_page, vaddr); | ||
57 | kunmap_atomic(vaddr); | ||
58 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | ||
59 | return -EFAULT; | ||
60 | } | ||
61 | |||
62 | return csize; | ||
63 | } | ||
64 | |||
65 | static int __init kdump_buf_page_init(void) | ||
66 | { | ||
67 | int ret = 0; | ||
68 | |||
69 | kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
70 | if (!kdump_buf_page) { | ||
71 | pr_warning("Kdump: Failed to allocate kdump buffer page\n"); | ||
72 | ret = -ENOMEM; | ||
73 | } | ||
74 | |||
75 | return ret; | ||
76 | } | ||
77 | arch_initcall(kdump_buf_page_init); | ||