aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/vmcore.c136
1 files changed, 116 insertions, 20 deletions
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 8ec648368985..28503172f2e4 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/crash_dump.h> 21#include <linux/crash_dump.h>
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/vmalloc.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include "internal.h" 26#include "internal.h"
@@ -194,9 +195,122 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
194 return acc; 195 return acc;
195} 196}
196 197
198/**
199 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
200 * vmalloc memory
201 *
202 * @notes_sz: size of buffer
203 *
204 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
205 * the buffer to user-space by means of remap_vmalloc_range().
206 *
207 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
208 * disabled and there's no need to allow users to mmap the buffer.
209 */
210static inline char *alloc_elfnotes_buf(size_t notes_sz)
211{
212#ifdef CONFIG_MMU
213 return vmalloc_user(notes_sz);
214#else
215 return vzalloc(notes_sz);
216#endif
217}
218
219/*
220 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
221 * essential for mmap_vmcore() in order to map physically
222 * non-contiguous objects (ELF header, ELF note segment and memory
223 * regions in the 1st kernel pointed to by PT_LOAD entries) into
224 * virtually contiguous user-space in ELF layout.
225 */
226#ifdef CONFIG_MMU
227static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
228{
229 size_t size = vma->vm_end - vma->vm_start;
230 u64 start, end, len, tsz;
231 struct vmcore *m;
232
233 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
234 end = start + size;
235
236 if (size > vmcore_size || end > vmcore_size)
237 return -EINVAL;
238
239 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
240 return -EPERM;
241
242 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
243 vma->vm_flags |= VM_MIXEDMAP;
244
245 len = 0;
246
247 if (start < elfcorebuf_sz) {
248 u64 pfn;
249
250 tsz = min(elfcorebuf_sz - (size_t)start, size);
251 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
252 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
253 vma->vm_page_prot))
254 return -EAGAIN;
255 size -= tsz;
256 start += tsz;
257 len += tsz;
258
259 if (size == 0)
260 return 0;
261 }
262
263 if (start < elfcorebuf_sz + elfnotes_sz) {
264 void *kaddr;
265
266 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
267 kaddr = elfnotes_buf + start - elfcorebuf_sz;
268 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
269 kaddr, tsz))
270 goto fail;
271 size -= tsz;
272 start += tsz;
273 len += tsz;
274
275 if (size == 0)
276 return 0;
277 }
278
279 list_for_each_entry(m, &vmcore_list, list) {
280 if (start < m->offset + m->size) {
281 u64 paddr = 0;
282
283 tsz = min_t(size_t, m->offset + m->size - start, size);
284 paddr = m->paddr + start - m->offset;
285 if (remap_pfn_range(vma, vma->vm_start + len,
286 paddr >> PAGE_SHIFT, tsz,
287 vma->vm_page_prot))
288 goto fail;
289 size -= tsz;
290 start += tsz;
291 len += tsz;
292
293 if (size == 0)
294 return 0;
295 }
296 }
297
298 return 0;
299fail:
300 do_munmap(vma->vm_mm, vma->vm_start, len);
301 return -EAGAIN;
302}
303#else
304static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
305{
306 return -ENOSYS;
307}
308#endif
309
197static const struct file_operations proc_vmcore_operations = { 310static const struct file_operations proc_vmcore_operations = {
198 .read = read_vmcore, 311 .read = read_vmcore,
199 .llseek = default_llseek, 312 .llseek = default_llseek,
313 .mmap = mmap_vmcore,
200}; 314};
201 315
202static struct vmcore* __init get_new_element(void) 316static struct vmcore* __init get_new_element(void)
@@ -348,7 +462,6 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
348 Elf64_Ehdr *ehdr_ptr; 462 Elf64_Ehdr *ehdr_ptr;
349 Elf64_Phdr phdr; 463 Elf64_Phdr phdr;
350 u64 phdr_sz = 0, note_off; 464 u64 phdr_sz = 0, note_off;
351 struct vm_struct *vm;
352 465
353 ehdr_ptr = (Elf64_Ehdr *)elfptr; 466 ehdr_ptr = (Elf64_Ehdr *)elfptr;
354 467
@@ -361,18 +474,10 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
361 return rc; 474 return rc;
362 475
363 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 476 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
364 *notes_buf = vzalloc(*notes_sz); 477 *notes_buf = alloc_elfnotes_buf(*notes_sz);
365 if (!*notes_buf) 478 if (!*notes_buf)
366 return -ENOMEM; 479 return -ENOMEM;
367 480
368 /*
369 * Allow users to remap ELF note segment buffer on vmalloc memory using
370 * remap_vmalloc_range.()
371 */
372 vm = find_vm_area(*notes_buf);
373 BUG_ON(!vm);
374 vm->flags |= VM_USERMAP;
375
376 rc = copy_notes_elf64(ehdr_ptr, *notes_buf); 481 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
377 if (rc < 0) 482 if (rc < 0)
378 return rc; 483 return rc;
@@ -536,7 +641,6 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
536 Elf32_Ehdr *ehdr_ptr; 641 Elf32_Ehdr *ehdr_ptr;
537 Elf32_Phdr phdr; 642 Elf32_Phdr phdr;
538 u64 phdr_sz = 0, note_off; 643 u64 phdr_sz = 0, note_off;
539 struct vm_struct *vm;
540 644
541 ehdr_ptr = (Elf32_Ehdr *)elfptr; 645 ehdr_ptr = (Elf32_Ehdr *)elfptr;
542 646
@@ -549,18 +653,10 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
549 return rc; 653 return rc;
550 654
551 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 655 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
552 *notes_buf = vzalloc(*notes_sz); 656 *notes_buf = alloc_elfnotes_buf(*notes_sz);
553 if (!*notes_buf) 657 if (!*notes_buf)
554 return -ENOMEM; 658 return -ENOMEM;
555 659
556 /*
557 * Allow users to remap ELF note segment buffer on vmalloc memory using
558 * remap_vmalloc_range()
559 */
560 vm = find_vm_area(*notes_buf);
561 BUG_ON(!vm);
562 vm->flags |= VM_USERMAP;
563
564 rc = copy_notes_elf32(ehdr_ptr, *notes_buf); 660 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
565 if (rc < 0) 661 if (rc < 0)
566 return rc; 662 return rc;