aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/vmcore.c119
1 files changed, 51 insertions, 68 deletions
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index ab0c92e64411..0b1c04e5e2c5 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -32,6 +32,7 @@ static LIST_HEAD(vmcore_list);
32/* Stores the pointer to the buffer containing kernel elf core headers. */ 32/* Stores the pointer to the buffer containing kernel elf core headers. */
33static char *elfcorebuf; 33static char *elfcorebuf;
34static size_t elfcorebuf_sz; 34static size_t elfcorebuf_sz;
35static size_t elfcorebuf_sz_orig;
35 36
36/* Total size of vmcore file. */ 37/* Total size of vmcore file. */
37static u64 vmcore_size; 38static u64 vmcore_size;
@@ -186,7 +187,7 @@ static struct vmcore* __init get_new_element(void)
186 return kzalloc(sizeof(struct vmcore), GFP_KERNEL); 187 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
187} 188}
188 189
189static u64 __init get_vmcore_size_elf64(char *elfptr) 190static u64 __init get_vmcore_size_elf64(char *elfptr, size_t elfsz)
190{ 191{
191 int i; 192 int i;
192 u64 size; 193 u64 size;
@@ -195,7 +196,7 @@ static u64 __init get_vmcore_size_elf64(char *elfptr)
195 196
196 ehdr_ptr = (Elf64_Ehdr *)elfptr; 197 ehdr_ptr = (Elf64_Ehdr *)elfptr;
197 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); 198 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
198 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); 199 size = elfsz;
199 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 200 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
200 size += phdr_ptr->p_memsz; 201 size += phdr_ptr->p_memsz;
201 phdr_ptr++; 202 phdr_ptr++;
@@ -203,7 +204,7 @@ static u64 __init get_vmcore_size_elf64(char *elfptr)
203 return size; 204 return size;
204} 205}
205 206
206static u64 __init get_vmcore_size_elf32(char *elfptr) 207static u64 __init get_vmcore_size_elf32(char *elfptr, size_t elfsz)
207{ 208{
208 int i; 209 int i;
209 u64 size; 210 u64 size;
@@ -212,7 +213,7 @@ static u64 __init get_vmcore_size_elf32(char *elfptr)
212 213
213 ehdr_ptr = (Elf32_Ehdr *)elfptr; 214 ehdr_ptr = (Elf32_Ehdr *)elfptr;
214 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); 215 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
215 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); 216 size = elfsz;
216 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 217 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
217 size += phdr_ptr->p_memsz; 218 size += phdr_ptr->p_memsz;
218 phdr_ptr++; 219 phdr_ptr++;
@@ -294,6 +295,8 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
294 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 295 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
295 *elfsz = *elfsz - i; 296 *elfsz = *elfsz - i;
296 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 297 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
298 memset(elfptr + *elfsz, 0, i);
299 *elfsz = roundup(*elfsz, PAGE_SIZE);
297 300
298 /* Modify e_phnum to reflect merged headers. */ 301 /* Modify e_phnum to reflect merged headers. */
299 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 302 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
@@ -375,6 +378,8 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
375 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 378 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
376 *elfsz = *elfsz - i; 379 *elfsz = *elfsz - i;
377 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 380 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
381 memset(elfptr + *elfsz, 0, i);
382 *elfsz = roundup(*elfsz, PAGE_SIZE);
378 383
379 /* Modify e_phnum to reflect merged headers. */ 384 /* Modify e_phnum to reflect merged headers. */
380 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 385 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
@@ -398,8 +403,7 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
398 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 403 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
399 404
400 /* First program header is PT_NOTE header. */ 405 /* First program header is PT_NOTE header. */
401 vmcore_off = sizeof(Elf64_Ehdr) + 406 vmcore_off = elfsz +
402 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
403 phdr_ptr->p_memsz; /* Note sections */ 407 phdr_ptr->p_memsz; /* Note sections */
404 408
405 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 409 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
@@ -435,8 +439,7 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
435 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 439 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
436 440
437 /* First program header is PT_NOTE header. */ 441 /* First program header is PT_NOTE header. */
438 vmcore_off = sizeof(Elf32_Ehdr) + 442 vmcore_off = elfsz +
439 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
440 phdr_ptr->p_memsz; /* Note sections */ 443 phdr_ptr->p_memsz; /* Note sections */
441 444
442 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 445 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
@@ -459,18 +462,14 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
459} 462}
460 463
461/* Sets offset fields of vmcore elements. */ 464/* Sets offset fields of vmcore elements. */
462static void __init set_vmcore_list_offsets_elf64(char *elfptr, 465static void __init set_vmcore_list_offsets(size_t elfsz,
463 struct list_head *vc_list) 466 struct list_head *vc_list)
464{ 467{
465 loff_t vmcore_off; 468 loff_t vmcore_off;
466 Elf64_Ehdr *ehdr_ptr;
467 struct vmcore *m; 469 struct vmcore *m;
468 470
469 ehdr_ptr = (Elf64_Ehdr *)elfptr;
470
471 /* Skip Elf header and program headers. */ 471 /* Skip Elf header and program headers. */
472 vmcore_off = sizeof(Elf64_Ehdr) + 472 vmcore_off = elfsz;
473 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
474 473
475 list_for_each_entry(m, vc_list, list) { 474 list_for_each_entry(m, vc_list, list) {
476 m->offset = vmcore_off; 475 m->offset = vmcore_off;
@@ -478,24 +477,10 @@ static void __init set_vmcore_list_offsets_elf64(char *elfptr,
478 } 477 }
479} 478}
480 479
481/* Sets offset fields of vmcore elements. */ 480static void free_elfcorebuf(void)
482static void __init set_vmcore_list_offsets_elf32(char *elfptr,
483 struct list_head *vc_list)
484{ 481{
485 loff_t vmcore_off; 482 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
486 Elf32_Ehdr *ehdr_ptr; 483 elfcorebuf = NULL;
487 struct vmcore *m;
488
489 ehdr_ptr = (Elf32_Ehdr *)elfptr;
490
491 /* Skip Elf header and program headers. */
492 vmcore_off = sizeof(Elf32_Ehdr) +
493 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
494
495 list_for_each_entry(m, vc_list, list) {
496 m->offset = vmcore_off;
497 vmcore_off += m->size;
498 }
499} 484}
500 485
501static int __init parse_crash_elf64_headers(void) 486static int __init parse_crash_elf64_headers(void)
@@ -526,31 +511,31 @@ static int __init parse_crash_elf64_headers(void)
526 } 511 }
527 512
528 /* Read in all elf headers. */ 513 /* Read in all elf headers. */
529 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); 514 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
530 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 515 ehdr.e_phnum * sizeof(Elf64_Phdr);
516 elfcorebuf_sz = elfcorebuf_sz_orig;
517 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
518 get_order(elfcorebuf_sz_orig));
531 if (!elfcorebuf) 519 if (!elfcorebuf)
532 return -ENOMEM; 520 return -ENOMEM;
533 addr = elfcorehdr_addr; 521 addr = elfcorehdr_addr;
534 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 522 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
535 if (rc < 0) { 523 if (rc < 0)
536 kfree(elfcorebuf); 524 goto fail;
537 return rc;
538 }
539 525
540 /* Merge all PT_NOTE headers into one. */ 526 /* Merge all PT_NOTE headers into one. */
541 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 527 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
542 if (rc) { 528 if (rc)
543 kfree(elfcorebuf); 529 goto fail;
544 return rc;
545 }
546 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 530 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
547 &vmcore_list); 531 &vmcore_list);
548 if (rc) { 532 if (rc)
549 kfree(elfcorebuf); 533 goto fail;
550 return rc; 534 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
551 }
552 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
553 return 0; 535 return 0;
536fail:
537 free_elfcorebuf();
538 return rc;
554} 539}
555 540
556static int __init parse_crash_elf32_headers(void) 541static int __init parse_crash_elf32_headers(void)
@@ -581,31 +566,30 @@ static int __init parse_crash_elf32_headers(void)
581 } 566 }
582 567
583 /* Read in all elf headers. */ 568 /* Read in all elf headers. */
584 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 569 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
585 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 570 elfcorebuf_sz = elfcorebuf_sz_orig;
571 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
572 get_order(elfcorebuf_sz_orig));
586 if (!elfcorebuf) 573 if (!elfcorebuf)
587 return -ENOMEM; 574 return -ENOMEM;
588 addr = elfcorehdr_addr; 575 addr = elfcorehdr_addr;
589 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 576 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
590 if (rc < 0) { 577 if (rc < 0)
591 kfree(elfcorebuf); 578 goto fail;
592 return rc;
593 }
594 579
595 /* Merge all PT_NOTE headers into one. */ 580 /* Merge all PT_NOTE headers into one. */
596 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 581 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
597 if (rc) { 582 if (rc)
598 kfree(elfcorebuf); 583 goto fail;
599 return rc;
600 }
601 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 584 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
602 &vmcore_list); 585 &vmcore_list);
603 if (rc) { 586 if (rc)
604 kfree(elfcorebuf); 587 goto fail;
605 return rc; 588 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
606 }
607 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
608 return 0; 589 return 0;
590fail:
591 free_elfcorebuf();
592 return rc;
609} 593}
610 594
611static int __init parse_crash_elf_headers(void) 595static int __init parse_crash_elf_headers(void)
@@ -629,14 +613,14 @@ static int __init parse_crash_elf_headers(void)
629 return rc; 613 return rc;
630 614
631 /* Determine vmcore size. */ 615 /* Determine vmcore size. */
632 vmcore_size = get_vmcore_size_elf64(elfcorebuf); 616 vmcore_size = get_vmcore_size_elf64(elfcorebuf, elfcorebuf_sz);
633 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 617 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
634 rc = parse_crash_elf32_headers(); 618 rc = parse_crash_elf32_headers();
635 if (rc) 619 if (rc)
636 return rc; 620 return rc;
637 621
638 /* Determine vmcore size. */ 622 /* Determine vmcore size. */
639 vmcore_size = get_vmcore_size_elf32(elfcorebuf); 623 vmcore_size = get_vmcore_size_elf32(elfcorebuf, elfcorebuf_sz);
640 } else { 624 } else {
641 pr_warn("Warning: Core image elf header is not sane\n"); 625 pr_warn("Warning: Core image elf header is not sane\n");
642 return -EINVAL; 626 return -EINVAL;
@@ -683,7 +667,6 @@ void vmcore_cleanup(void)
683 list_del(&m->list); 667 list_del(&m->list);
684 kfree(m); 668 kfree(m);
685 } 669 }
686 kfree(elfcorebuf); 670 free_elfcorebuf();
687 elfcorebuf = NULL;
688} 671}
689EXPORT_SYMBOL_GPL(vmcore_cleanup); 672EXPORT_SYMBOL_GPL(vmcore_cleanup);