aboutsummaryrefslogtreecommitdiffstats
path: root/fs/binfmt_elf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/binfmt_elf.c')
-rw-r--r--fs/binfmt_elf.c677
1 files changed, 513 insertions, 164 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f0b3171842f2..18ed6dd906c1 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -45,7 +45,8 @@
45 45
46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); 46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *); 47static int load_elf_library(struct file *);
48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); 48static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
49 int, int, unsigned long);
49 50
50/* 51/*
51 * If we don't support core dumping, then supply a NULL so we 52 * If we don't support core dumping, then supply a NULL so we
@@ -298,33 +299,70 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
298#ifndef elf_map 299#ifndef elf_map
299 300
300static unsigned long elf_map(struct file *filep, unsigned long addr, 301static unsigned long elf_map(struct file *filep, unsigned long addr,
301 struct elf_phdr *eppnt, int prot, int type) 302 struct elf_phdr *eppnt, int prot, int type,
303 unsigned long total_size)
302{ 304{
303 unsigned long map_addr; 305 unsigned long map_addr;
304 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); 306 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
307 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
308 addr = ELF_PAGESTART(addr);
309 size = ELF_PAGEALIGN(size);
305 310
306 down_write(&current->mm->mmap_sem);
307 /* mmap() will return -EINVAL if given a zero size, but a 311 /* mmap() will return -EINVAL if given a zero size, but a
308 * segment with zero filesize is perfectly valid */ 312 * segment with zero filesize is perfectly valid */
309 if (eppnt->p_filesz + pageoffset) 313 if (!size)
310 map_addr = do_mmap(filep, ELF_PAGESTART(addr), 314 return addr;
311 eppnt->p_filesz + pageoffset, prot, type, 315
312 eppnt->p_offset - pageoffset); 316 down_write(&current->mm->mmap_sem);
313 else 317 /*
314 map_addr = ELF_PAGESTART(addr); 318 * total_size is the size of the ELF (interpreter) image.
319 * The _first_ mmap needs to know the full size, otherwise
320 * randomization might put this image into an overlapping
321 * position with the ELF binary image. (since size < total_size)
322 * So we first map the 'big' image - and unmap the remainder at
323 * the end. (which unmap is needed for ELF images with holes.)
324 */
325 if (total_size) {
326 total_size = ELF_PAGEALIGN(total_size);
327 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
328 if (!BAD_ADDR(map_addr))
329 do_munmap(current->mm, map_addr+size, total_size-size);
330 } else
331 map_addr = do_mmap(filep, addr, size, prot, type, off);
332
315 up_write(&current->mm->mmap_sem); 333 up_write(&current->mm->mmap_sem);
316 return(map_addr); 334 return(map_addr);
317} 335}
318 336
319#endif /* !elf_map */ 337#endif /* !elf_map */
320 338
339static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
340{
341 int i, first_idx = -1, last_idx = -1;
342
343 for (i = 0; i < nr; i++) {
344 if (cmds[i].p_type == PT_LOAD) {
345 last_idx = i;
346 if (first_idx == -1)
347 first_idx = i;
348 }
349 }
350 if (first_idx == -1)
351 return 0;
352
353 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
354 ELF_PAGESTART(cmds[first_idx].p_vaddr);
355}
356
357
321/* This is much more generalized than the library routine read function, 358/* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function 359 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have 360 is only provided so that we can read a.out libraries that have
324 an ELF header */ 361 an ELF header */
325 362
326static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 363static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
327 struct file *interpreter, unsigned long *interp_load_addr) 364 struct file *interpreter, unsigned long *interp_map_addr,
365 unsigned long no_base)
328{ 366{
329 struct elf_phdr *elf_phdata; 367 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt; 368 struct elf_phdr *eppnt;
@@ -332,6 +370,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
332 int load_addr_set = 0; 370 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0; 371 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL; 372 unsigned long error = ~0UL;
373 unsigned long total_size;
335 int retval, i, size; 374 int retval, i, size;
336 375
337 /* First of all, some simple consistency checks */ 376 /* First of all, some simple consistency checks */
@@ -370,6 +409,12 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
370 goto out_close; 409 goto out_close;
371 } 410 }
372 411
412 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
413 if (!total_size) {
414 error = -EINVAL;
415 goto out_close;
416 }
417
373 eppnt = elf_phdata; 418 eppnt = elf_phdata;
374 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 419 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
375 if (eppnt->p_type == PT_LOAD) { 420 if (eppnt->p_type == PT_LOAD) {
@@ -387,9 +432,14 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
387 vaddr = eppnt->p_vaddr; 432 vaddr = eppnt->p_vaddr;
388 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 433 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
389 elf_type |= MAP_FIXED; 434 elf_type |= MAP_FIXED;
435 else if (no_base && interp_elf_ex->e_type == ET_DYN)
436 load_addr = -vaddr;
390 437
391 map_addr = elf_map(interpreter, load_addr + vaddr, 438 map_addr = elf_map(interpreter, load_addr + vaddr,
392 eppnt, elf_prot, elf_type); 439 eppnt, elf_prot, elf_type, total_size);
440 total_size = 0;
441 if (!*interp_map_addr)
442 *interp_map_addr = map_addr;
393 error = map_addr; 443 error = map_addr;
394 if (BAD_ADDR(map_addr)) 444 if (BAD_ADDR(map_addr))
395 goto out_close; 445 goto out_close;
@@ -455,8 +505,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
455 goto out_close; 505 goto out_close;
456 } 506 }
457 507
458 *interp_load_addr = load_addr; 508 error = load_addr;
459 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
460 509
461out_close: 510out_close:
462 kfree(elf_phdata); 511 kfree(elf_phdata);
@@ -546,14 +595,14 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
546 int load_addr_set = 0; 595 int load_addr_set = 0;
547 char * elf_interpreter = NULL; 596 char * elf_interpreter = NULL;
548 unsigned int interpreter_type = INTERPRETER_NONE; 597 unsigned int interpreter_type = INTERPRETER_NONE;
549 unsigned char ibcs2_interpreter = 0;
550 unsigned long error; 598 unsigned long error;
551 struct elf_phdr *elf_ppnt, *elf_phdata; 599 struct elf_phdr *elf_ppnt, *elf_phdata;
552 unsigned long elf_bss, elf_brk; 600 unsigned long elf_bss, elf_brk;
553 int elf_exec_fileno; 601 int elf_exec_fileno;
554 int retval, i; 602 int retval, i;
555 unsigned int size; 603 unsigned int size;
556 unsigned long elf_entry, interp_load_addr = 0; 604 unsigned long elf_entry;
605 unsigned long interp_load_addr = 0;
557 unsigned long start_code, end_code, start_data, end_data; 606 unsigned long start_code, end_code, start_data, end_data;
558 unsigned long reloc_func_desc = 0; 607 unsigned long reloc_func_desc = 0;
559 char passed_fileno[6]; 608 char passed_fileno[6];
@@ -663,14 +712,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
663 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') 712 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
664 goto out_free_interp; 713 goto out_free_interp;
665 714
666 /* If the program interpreter is one of these two,
667 * then assume an iBCS2 image. Otherwise assume
668 * a native linux image.
669 */
670 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
671 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
672 ibcs2_interpreter = 1;
673
674 /* 715 /*
675 * The early SET_PERSONALITY here is so that the lookup 716 * The early SET_PERSONALITY here is so that the lookup
676 * for the interpreter happens in the namespace of the 717 * for the interpreter happens in the namespace of the
@@ -690,7 +731,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
690 * switch really is going to happen - do this in 731 * switch really is going to happen - do this in
691 * flush_thread(). - akpm 732 * flush_thread(). - akpm
692 */ 733 */
693 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); 734 SET_PERSONALITY(loc->elf_ex, 0);
694 735
695 interpreter = open_exec(elf_interpreter); 736 interpreter = open_exec(elf_interpreter);
696 retval = PTR_ERR(interpreter); 737 retval = PTR_ERR(interpreter);
@@ -769,7 +810,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
769 goto out_free_dentry; 810 goto out_free_dentry;
770 } else { 811 } else {
771 /* Executables without an interpreter also need a personality */ 812 /* Executables without an interpreter also need a personality */
772 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); 813 SET_PERSONALITY(loc->elf_ex, 0);
773 } 814 }
774 815
775 /* OK, we are done with that, now set up the arg stuff, 816 /* OK, we are done with that, now set up the arg stuff,
@@ -803,7 +844,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
803 844
804 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 845 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
805 may depend on the personality. */ 846 may depend on the personality. */
806 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); 847 SET_PERSONALITY(loc->elf_ex, 0);
807 if (elf_read_implies_exec(loc->elf_ex, executable_stack)) 848 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
808 current->personality |= READ_IMPLIES_EXEC; 849 current->personality |= READ_IMPLIES_EXEC;
809 850
@@ -825,9 +866,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
825 current->mm->start_stack = bprm->p; 866 current->mm->start_stack = bprm->p;
826 867
827 /* Now we do a little grungy work by mmaping the ELF image into 868 /* Now we do a little grungy work by mmaping the ELF image into
828 the correct location in memory. At this point, we assume that 869 the correct location in memory. */
829 the image should be loaded at fixed address, not at a variable
830 address. */
831 for(i = 0, elf_ppnt = elf_phdata; 870 for(i = 0, elf_ppnt = elf_phdata;
832 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 871 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
833 int elf_prot = 0, elf_flags; 872 int elf_prot = 0, elf_flags;
@@ -881,11 +920,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
881 * default mmap base, as well as whatever program they 920 * default mmap base, as well as whatever program they
882 * might try to exec. This is because the brk will 921 * might try to exec. This is because the brk will
883 * follow the loader, and is not movable. */ 922 * follow the loader, and is not movable. */
923#ifdef CONFIG_X86
924 load_bias = 0;
925#else
884 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); 926 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
927#endif
885 } 928 }
886 929
887 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 930 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
888 elf_prot, elf_flags); 931 elf_prot, elf_flags, 0);
889 if (BAD_ADDR(error)) { 932 if (BAD_ADDR(error)) {
890 send_sig(SIGKILL, current, 0); 933 send_sig(SIGKILL, current, 0);
891 retval = IS_ERR((void *)error) ? 934 retval = IS_ERR((void *)error) ?
@@ -961,13 +1004,25 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
961 } 1004 }
962 1005
963 if (elf_interpreter) { 1006 if (elf_interpreter) {
964 if (interpreter_type == INTERPRETER_AOUT) 1007 if (interpreter_type == INTERPRETER_AOUT) {
965 elf_entry = load_aout_interp(&loc->interp_ex, 1008 elf_entry = load_aout_interp(&loc->interp_ex,
966 interpreter); 1009 interpreter);
967 else 1010 } else {
1011 unsigned long uninitialized_var(interp_map_addr);
1012
968 elf_entry = load_elf_interp(&loc->interp_elf_ex, 1013 elf_entry = load_elf_interp(&loc->interp_elf_ex,
969 interpreter, 1014 interpreter,
970 &interp_load_addr); 1015 &interp_map_addr,
1016 load_bias);
1017 if (!IS_ERR((void *)elf_entry)) {
1018 /*
1019 * load_elf_interp() returns relocation
1020 * adjustment
1021 */
1022 interp_load_addr = elf_entry;
1023 elf_entry += loc->interp_elf_ex.e_entry;
1024 }
1025 }
971 if (BAD_ADDR(elf_entry)) { 1026 if (BAD_ADDR(elf_entry)) {
972 force_sig(SIGSEGV, current); 1027 force_sig(SIGSEGV, current);
973 retval = IS_ERR((void *)elf_entry) ? 1028 retval = IS_ERR((void *)elf_entry) ?
@@ -1021,6 +1076,12 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1021 current->mm->end_data = end_data; 1076 current->mm->end_data = end_data;
1022 current->mm->start_stack = bprm->p; 1077 current->mm->start_stack = bprm->p;
1023 1078
1079#ifdef arch_randomize_brk
1080 if (current->flags & PF_RANDOMIZE)
1081 current->mm->brk = current->mm->start_brk =
1082 arch_randomize_brk(current->mm);
1083#endif
1084
1024 if (current->personality & MMAP_PAGE_ZERO) { 1085 if (current->personality & MMAP_PAGE_ZERO) {
1025 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1086 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1026 and some applications "depend" upon this behavior. 1087 and some applications "depend" upon this behavior.
@@ -1325,7 +1386,8 @@ static int writenote(struct memelfnote *men, struct file *file,
1325 if (!dump_seek(file, (off))) \ 1386 if (!dump_seek(file, (off))) \
1326 goto end_coredump; 1387 goto end_coredump;
1327 1388
1328static void fill_elf_header(struct elfhdr *elf, int segs) 1389static void fill_elf_header(struct elfhdr *elf, int segs,
1390 u16 machine, u32 flags, u8 osabi)
1329{ 1391{
1330 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1392 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1331 elf->e_ident[EI_CLASS] = ELF_CLASS; 1393 elf->e_ident[EI_CLASS] = ELF_CLASS;
@@ -1335,12 +1397,12 @@ static void fill_elf_header(struct elfhdr *elf, int segs)
1335 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); 1397 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1336 1398
1337 elf->e_type = ET_CORE; 1399 elf->e_type = ET_CORE;
1338 elf->e_machine = ELF_ARCH; 1400 elf->e_machine = machine;
1339 elf->e_version = EV_CURRENT; 1401 elf->e_version = EV_CURRENT;
1340 elf->e_entry = 0; 1402 elf->e_entry = 0;
1341 elf->e_phoff = sizeof(struct elfhdr); 1403 elf->e_phoff = sizeof(struct elfhdr);
1342 elf->e_shoff = 0; 1404 elf->e_shoff = 0;
1343 elf->e_flags = ELF_CORE_EFLAGS; 1405 elf->e_flags = flags;
1344 elf->e_ehsize = sizeof(struct elfhdr); 1406 elf->e_ehsize = sizeof(struct elfhdr);
1345 elf->e_phentsize = sizeof(struct elf_phdr); 1407 elf->e_phentsize = sizeof(struct elf_phdr);
1346 elf->e_phnum = segs; 1408 elf->e_phnum = segs;
@@ -1447,6 +1509,238 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1447 return 0; 1509 return 0;
1448} 1510}
1449 1511
1512static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1513{
1514 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1515 int i = 0;
1516 do
1517 i += 2;
1518 while (auxv[i - 2] != AT_NULL);
1519 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1520}
1521
1522#ifdef CORE_DUMP_USE_REGSET
1523#include <linux/regset.h>
1524
1525struct elf_thread_core_info {
1526 struct elf_thread_core_info *next;
1527 struct task_struct *task;
1528 struct elf_prstatus prstatus;
1529 struct memelfnote notes[0];
1530};
1531
1532struct elf_note_info {
1533 struct elf_thread_core_info *thread;
1534 struct memelfnote psinfo;
1535 struct memelfnote auxv;
1536 size_t size;
1537 int thread_notes;
1538};
1539
1540static int fill_thread_core_info(struct elf_thread_core_info *t,
1541 const struct user_regset_view *view,
1542 long signr, size_t *total)
1543{
1544 unsigned int i;
1545
1546 /*
1547 * NT_PRSTATUS is the one special case, because the regset data
1548 * goes into the pr_reg field inside the note contents, rather
1549 * than being the whole note contents. We fill the reset in here.
1550 * We assume that regset 0 is NT_PRSTATUS.
1551 */
1552 fill_prstatus(&t->prstatus, t->task, signr);
1553 (void) view->regsets[0].get(t->task, &view->regsets[0],
1554 0, sizeof(t->prstatus.pr_reg),
1555 &t->prstatus.pr_reg, NULL);
1556
1557 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1558 sizeof(t->prstatus), &t->prstatus);
1559 *total += notesize(&t->notes[0]);
1560
1561 /*
1562 * Each other regset might generate a note too. For each regset
1563 * that has no core_note_type or is inactive, we leave t->notes[i]
1564 * all zero and we'll know to skip writing it later.
1565 */
1566 for (i = 1; i < view->n; ++i) {
1567 const struct user_regset *regset = &view->regsets[i];
1568 if (regset->core_note_type &&
1569 (!regset->active || regset->active(t->task, regset))) {
1570 int ret;
1571 size_t size = regset->n * regset->size;
1572 void *data = kmalloc(size, GFP_KERNEL);
1573 if (unlikely(!data))
1574 return 0;
1575 ret = regset->get(t->task, regset,
1576 0, size, data, NULL);
1577 if (unlikely(ret))
1578 kfree(data);
1579 else {
1580 if (regset->core_note_type != NT_PRFPREG)
1581 fill_note(&t->notes[i], "LINUX",
1582 regset->core_note_type,
1583 size, data);
1584 else {
1585 t->prstatus.pr_fpvalid = 1;
1586 fill_note(&t->notes[i], "CORE",
1587 NT_PRFPREG, size, data);
1588 }
1589 *total += notesize(&t->notes[i]);
1590 }
1591 }
1592 }
1593
1594 return 1;
1595}
1596
1597static int fill_note_info(struct elfhdr *elf, int phdrs,
1598 struct elf_note_info *info,
1599 long signr, struct pt_regs *regs)
1600{
1601 struct task_struct *dump_task = current;
1602 const struct user_regset_view *view = task_user_regset_view(dump_task);
1603 struct elf_thread_core_info *t;
1604 struct elf_prpsinfo *psinfo;
1605 struct task_struct *g, *p;
1606 unsigned int i;
1607
1608 info->size = 0;
1609 info->thread = NULL;
1610
1611 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1612 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1613
1614 if (psinfo == NULL)
1615 return 0;
1616
1617 /*
1618 * Figure out how many notes we're going to need for each thread.
1619 */
1620 info->thread_notes = 0;
1621 for (i = 0; i < view->n; ++i)
1622 if (view->regsets[i].core_note_type != 0)
1623 ++info->thread_notes;
1624
1625 /*
1626 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1627 * since it is our one special case.
1628 */
1629 if (unlikely(info->thread_notes == 0) ||
1630 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1631 WARN_ON(1);
1632 return 0;
1633 }
1634
1635 /*
1636 * Initialize the ELF file header.
1637 */
1638 fill_elf_header(elf, phdrs,
1639 view->e_machine, view->e_flags, view->ei_osabi);
1640
1641 /*
1642 * Allocate a structure for each thread.
1643 */
1644 rcu_read_lock();
1645 do_each_thread(g, p)
1646 if (p->mm == dump_task->mm) {
1647 t = kzalloc(offsetof(struct elf_thread_core_info,
1648 notes[info->thread_notes]),
1649 GFP_ATOMIC);
1650 if (unlikely(!t)) {
1651 rcu_read_unlock();
1652 return 0;
1653 }
1654 t->task = p;
1655 if (p == dump_task || !info->thread) {
1656 t->next = info->thread;
1657 info->thread = t;
1658 } else {
1659 /*
1660 * Make sure to keep the original task at
1661 * the head of the list.
1662 */
1663 t->next = info->thread->next;
1664 info->thread->next = t;
1665 }
1666 }
1667 while_each_thread(g, p);
1668 rcu_read_unlock();
1669
1670 /*
1671 * Now fill in each thread's information.
1672 */
1673 for (t = info->thread; t != NULL; t = t->next)
1674 if (!fill_thread_core_info(t, view, signr, &info->size))
1675 return 0;
1676
1677 /*
1678 * Fill in the two process-wide notes.
1679 */
1680 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1681 info->size += notesize(&info->psinfo);
1682
1683 fill_auxv_note(&info->auxv, current->mm);
1684 info->size += notesize(&info->auxv);
1685
1686 return 1;
1687}
1688
1689static size_t get_note_info_size(struct elf_note_info *info)
1690{
1691 return info->size;
1692}
1693
1694/*
1695 * Write all the notes for each thread. When writing the first thread, the
1696 * process-wide notes are interleaved after the first thread-specific note.
1697 */
1698static int write_note_info(struct elf_note_info *info,
1699 struct file *file, loff_t *foffset)
1700{
1701 bool first = 1;
1702 struct elf_thread_core_info *t = info->thread;
1703
1704 do {
1705 int i;
1706
1707 if (!writenote(&t->notes[0], file, foffset))
1708 return 0;
1709
1710 if (first && !writenote(&info->psinfo, file, foffset))
1711 return 0;
1712 if (first && !writenote(&info->auxv, file, foffset))
1713 return 0;
1714
1715 for (i = 1; i < info->thread_notes; ++i)
1716 if (t->notes[i].data &&
1717 !writenote(&t->notes[i], file, foffset))
1718 return 0;
1719
1720 first = 0;
1721 t = t->next;
1722 } while (t);
1723
1724 return 1;
1725}
1726
1727static void free_note_info(struct elf_note_info *info)
1728{
1729 struct elf_thread_core_info *threads = info->thread;
1730 while (threads) {
1731 unsigned int i;
1732 struct elf_thread_core_info *t = threads;
1733 threads = t->next;
1734 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1735 for (i = 1; i < info->thread_notes; ++i)
1736 kfree(t->notes[i].data);
1737 kfree(t);
1738 }
1739 kfree(info->psinfo.data);
1740}
1741
1742#else
1743
1450/* Here is the structure in which status of each thread is captured. */ 1744/* Here is the structure in which status of each thread is captured. */
1451struct elf_thread_status 1745struct elf_thread_status
1452{ 1746{
@@ -1499,6 +1793,176 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1499 return sz; 1793 return sz;
1500} 1794}
1501 1795
1796struct elf_note_info {
1797 struct memelfnote *notes;
1798 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1799 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1800 struct list_head thread_list;
1801 elf_fpregset_t *fpu;
1802#ifdef ELF_CORE_COPY_XFPREGS
1803 elf_fpxregset_t *xfpu;
1804#endif
1805 int thread_status_size;
1806 int numnote;
1807};
1808
1809static int fill_note_info(struct elfhdr *elf, int phdrs,
1810 struct elf_note_info *info,
1811 long signr, struct pt_regs *regs)
1812{
1813#define NUM_NOTES 6
1814 struct list_head *t;
1815 struct task_struct *g, *p;
1816
1817 info->notes = NULL;
1818 info->prstatus = NULL;
1819 info->psinfo = NULL;
1820 info->fpu = NULL;
1821#ifdef ELF_CORE_COPY_XFPREGS
1822 info->xfpu = NULL;
1823#endif
1824 INIT_LIST_HEAD(&info->thread_list);
1825
1826 info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote),
1827 GFP_KERNEL);
1828 if (!info->notes)
1829 return 0;
1830 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1831 if (!info->psinfo)
1832 return 0;
1833 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1834 if (!info->prstatus)
1835 return 0;
1836 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1837 if (!info->fpu)
1838 return 0;
1839#ifdef ELF_CORE_COPY_XFPREGS
1840 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1841 if (!info->xfpu)
1842 return 0;
1843#endif
1844
1845 info->thread_status_size = 0;
1846 if (signr) {
1847 struct elf_thread_status *tmp;
1848 rcu_read_lock();
1849 do_each_thread(g, p)
1850 if (current->mm == p->mm && current != p) {
1851 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1852 if (!tmp) {
1853 rcu_read_unlock();
1854 return 0;
1855 }
1856 tmp->thread = p;
1857 list_add(&tmp->list, &info->thread_list);
1858 }
1859 while_each_thread(g, p);
1860 rcu_read_unlock();
1861 list_for_each(t, &info->thread_list) {
1862 struct elf_thread_status *tmp;
1863 int sz;
1864
1865 tmp = list_entry(t, struct elf_thread_status, list);
1866 sz = elf_dump_thread_status(signr, tmp);
1867 info->thread_status_size += sz;
1868 }
1869 }
1870 /* now collect the dump for the current */
1871 memset(info->prstatus, 0, sizeof(*info->prstatus));
1872 fill_prstatus(info->prstatus, current, signr);
1873 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1874
1875 /* Set up header */
1876 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
1877
1878 /*
1879 * Set up the notes in similar form to SVR4 core dumps made
1880 * with info from their /proc.
1881 */
1882
1883 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
1884 sizeof(*info->prstatus), info->prstatus);
1885 fill_psinfo(info->psinfo, current->group_leader, current->mm);
1886 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
1887 sizeof(*info->psinfo), info->psinfo);
1888
1889 info->numnote = 2;
1890
1891 fill_auxv_note(&info->notes[info->numnote++], current->mm);
1892
1893 /* Try to dump the FPU. */
1894 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
1895 info->fpu);
1896 if (info->prstatus->pr_fpvalid)
1897 fill_note(info->notes + info->numnote++,
1898 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
1899#ifdef ELF_CORE_COPY_XFPREGS
1900 if (elf_core_copy_task_xfpregs(current, info->xfpu))
1901 fill_note(info->notes + info->numnote++,
1902 "LINUX", ELF_CORE_XFPREG_TYPE,
1903 sizeof(*info->xfpu), info->xfpu);
1904#endif
1905
1906 return 1;
1907
1908#undef NUM_NOTES
1909}
1910
1911static size_t get_note_info_size(struct elf_note_info *info)
1912{
1913 int sz = 0;
1914 int i;
1915
1916 for (i = 0; i < info->numnote; i++)
1917 sz += notesize(info->notes + i);
1918
1919 sz += info->thread_status_size;
1920
1921 return sz;
1922}
1923
1924static int write_note_info(struct elf_note_info *info,
1925 struct file *file, loff_t *foffset)
1926{
1927 int i;
1928 struct list_head *t;
1929
1930 for (i = 0; i < info->numnote; i++)
1931 if (!writenote(info->notes + i, file, foffset))
1932 return 0;
1933
1934 /* write out the thread status notes section */
1935 list_for_each(t, &info->thread_list) {
1936 struct elf_thread_status *tmp =
1937 list_entry(t, struct elf_thread_status, list);
1938
1939 for (i = 0; i < tmp->num_notes; i++)
1940 if (!writenote(&tmp->notes[i], file, foffset))
1941 return 0;
1942 }
1943
1944 return 1;
1945}
1946
1947static void free_note_info(struct elf_note_info *info)
1948{
1949 while (!list_empty(&info->thread_list)) {
1950 struct list_head *tmp = info->thread_list.next;
1951 list_del(tmp);
1952 kfree(list_entry(tmp, struct elf_thread_status, list));
1953 }
1954
1955 kfree(info->prstatus);
1956 kfree(info->psinfo);
1957 kfree(info->notes);
1958 kfree(info->fpu);
1959#ifdef ELF_CORE_COPY_XFPREGS
1960 kfree(info->xfpu);
1961#endif
1962}
1963
1964#endif
1965
1502static struct vm_area_struct *first_vma(struct task_struct *tsk, 1966static struct vm_area_struct *first_vma(struct task_struct *tsk,
1503 struct vm_area_struct *gate_vma) 1967 struct vm_area_struct *gate_vma)
1504{ 1968{
@@ -1534,29 +1998,15 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1534 */ 1998 */
1535static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) 1999static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
1536{ 2000{
1537#define NUM_NOTES 6
1538 int has_dumped = 0; 2001 int has_dumped = 0;
1539 mm_segment_t fs; 2002 mm_segment_t fs;
1540 int segs; 2003 int segs;
1541 size_t size = 0; 2004 size_t size = 0;
1542 int i;
1543 struct vm_area_struct *vma, *gate_vma; 2005 struct vm_area_struct *vma, *gate_vma;
1544 struct elfhdr *elf = NULL; 2006 struct elfhdr *elf = NULL;
1545 loff_t offset = 0, dataoff, foffset; 2007 loff_t offset = 0, dataoff, foffset;
1546 int numnote;
1547 struct memelfnote *notes = NULL;
1548 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1549 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1550 struct task_struct *g, *p;
1551 LIST_HEAD(thread_list);
1552 struct list_head *t;
1553 elf_fpregset_t *fpu = NULL;
1554#ifdef ELF_CORE_COPY_XFPREGS
1555 elf_fpxregset_t *xfpu = NULL;
1556#endif
1557 int thread_status_size = 0;
1558 elf_addr_t *auxv;
1559 unsigned long mm_flags; 2008 unsigned long mm_flags;
2009 struct elf_note_info info;
1560 2010
1561 /* 2011 /*
1562 * We no longer stop all VM operations. 2012 * We no longer stop all VM operations.
@@ -1574,52 +2024,6 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1574 elf = kmalloc(sizeof(*elf), GFP_KERNEL); 2024 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1575 if (!elf) 2025 if (!elf)
1576 goto cleanup; 2026 goto cleanup;
1577 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1578 if (!prstatus)
1579 goto cleanup;
1580 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1581 if (!psinfo)
1582 goto cleanup;
1583 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1584 if (!notes)
1585 goto cleanup;
1586 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1587 if (!fpu)
1588 goto cleanup;
1589#ifdef ELF_CORE_COPY_XFPREGS
1590 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1591 if (!xfpu)
1592 goto cleanup;
1593#endif
1594
1595 if (signr) {
1596 struct elf_thread_status *tmp;
1597 rcu_read_lock();
1598 do_each_thread(g,p)
1599 if (current->mm == p->mm && current != p) {
1600 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1601 if (!tmp) {
1602 rcu_read_unlock();
1603 goto cleanup;
1604 }
1605 tmp->thread = p;
1606 list_add(&tmp->list, &thread_list);
1607 }
1608 while_each_thread(g,p);
1609 rcu_read_unlock();
1610 list_for_each(t, &thread_list) {
1611 struct elf_thread_status *tmp;
1612 int sz;
1613
1614 tmp = list_entry(t, struct elf_thread_status, list);
1615 sz = elf_dump_thread_status(signr, tmp);
1616 thread_status_size += sz;
1617 }
1618 }
1619 /* now collect the dump for the current */
1620 memset(prstatus, 0, sizeof(*prstatus));
1621 fill_prstatus(prstatus, current, signr);
1622 elf_core_copy_regs(&prstatus->pr_reg, regs);
1623 2027
1624 segs = current->mm->map_count; 2028 segs = current->mm->map_count;
1625#ifdef ELF_CORE_EXTRA_PHDRS 2029#ifdef ELF_CORE_EXTRA_PHDRS
@@ -1630,42 +2034,16 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1630 if (gate_vma != NULL) 2034 if (gate_vma != NULL)
1631 segs++; 2035 segs++;
1632 2036
1633 /* Set up header */
1634 fill_elf_header(elf, segs + 1); /* including notes section */
1635
1636 has_dumped = 1;
1637 current->flags |= PF_DUMPCORE;
1638
1639 /* 2037 /*
1640 * Set up the notes in similar form to SVR4 core dumps made 2038 * Collect all the non-memory information about the process for the
1641 * with info from their /proc. 2039 * notes. This also sets up the file header.
1642 */ 2040 */
2041 if (!fill_note_info(elf, segs + 1, /* including notes section */
2042 &info, signr, regs))
2043 goto cleanup;
1643 2044
1644 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); 2045 has_dumped = 1;
1645 fill_psinfo(psinfo, current->group_leader, current->mm); 2046 current->flags |= PF_DUMPCORE;
1646 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1647
1648 numnote = 2;
1649
1650 auxv = (elf_addr_t *)current->mm->saved_auxv;
1651
1652 i = 0;
1653 do
1654 i += 2;
1655 while (auxv[i - 2] != AT_NULL);
1656 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1657 i * sizeof(elf_addr_t), auxv);
1658
1659 /* Try to dump the FPU. */
1660 if ((prstatus->pr_fpvalid =
1661 elf_core_copy_task_fpregs(current, regs, fpu)))
1662 fill_note(notes + numnote++,
1663 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1664#ifdef ELF_CORE_COPY_XFPREGS
1665 if (elf_core_copy_task_xfpregs(current, xfpu))
1666 fill_note(notes + numnote++,
1667 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
1668#endif
1669 2047
1670 fs = get_fs(); 2048 fs = get_fs();
1671 set_fs(KERNEL_DS); 2049 set_fs(KERNEL_DS);
@@ -1678,12 +2056,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1678 /* Write notes phdr entry */ 2056 /* Write notes phdr entry */
1679 { 2057 {
1680 struct elf_phdr phdr; 2058 struct elf_phdr phdr;
1681 int sz = 0; 2059 size_t sz = get_note_info_size(&info);
1682
1683 for (i = 0; i < numnote; i++)
1684 sz += notesize(notes + i);
1685
1686 sz += thread_status_size;
1687 2060
1688 sz += elf_coredump_extra_notes_size(); 2061 sz += elf_coredump_extra_notes_size();
1689 2062
@@ -1728,23 +2101,12 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1728#endif 2101#endif
1729 2102
1730 /* write out the notes section */ 2103 /* write out the notes section */
1731 for (i = 0; i < numnote; i++) 2104 if (!write_note_info(&info, file, &foffset))
1732 if (!writenote(notes + i, file, &foffset)) 2105 goto end_coredump;
1733 goto end_coredump;
1734 2106
1735 if (elf_coredump_extra_notes_write(file, &foffset)) 2107 if (elf_coredump_extra_notes_write(file, &foffset))
1736 goto end_coredump; 2108 goto end_coredump;
1737 2109
1738 /* write out the thread status notes section */
1739 list_for_each(t, &thread_list) {
1740 struct elf_thread_status *tmp =
1741 list_entry(t, struct elf_thread_status, list);
1742
1743 for (i = 0; i < tmp->num_notes; i++)
1744 if (!writenote(&tmp->notes[i], file, &foffset))
1745 goto end_coredump;
1746 }
1747
1748 /* Align to page */ 2110 /* Align to page */
1749 DUMP_SEEK(dataoff - foffset); 2111 DUMP_SEEK(dataoff - foffset);
1750 2112
@@ -1795,22 +2157,9 @@ end_coredump:
1795 set_fs(fs); 2157 set_fs(fs);
1796 2158
1797cleanup: 2159cleanup:
1798 while (!list_empty(&thread_list)) {
1799 struct list_head *tmp = thread_list.next;
1800 list_del(tmp);
1801 kfree(list_entry(tmp, struct elf_thread_status, list));
1802 }
1803
1804 kfree(elf); 2160 kfree(elf);
1805 kfree(prstatus); 2161 free_note_info(&info);
1806 kfree(psinfo);
1807 kfree(notes);
1808 kfree(fpu);
1809#ifdef ELF_CORE_COPY_XFPREGS
1810 kfree(xfpu);
1811#endif
1812 return has_dumped; 2162 return has_dumped;
1813#undef NUM_NOTES
1814} 2163}
1815 2164
1816#endif /* USE_ELF_CORE_DUMP */ 2165#endif /* USE_ELF_CORE_DUMP */