aboutsummaryrefslogtreecommitdiffstats
path: root/fs/binfmt_elf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/binfmt_elf.c')
-rw-r--r--fs/binfmt_elf.c77
1 files changed, 32 insertions, 45 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index bad7d8770d72..88d180306cf9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1478,7 +1478,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1478 const struct user_regset_view *view = task_user_regset_view(dump_task); 1478 const struct user_regset_view *view = task_user_regset_view(dump_task);
1479 struct elf_thread_core_info *t; 1479 struct elf_thread_core_info *t;
1480 struct elf_prpsinfo *psinfo; 1480 struct elf_prpsinfo *psinfo;
1481 struct task_struct *g, *p; 1481 struct core_thread *ct;
1482 unsigned int i; 1482 unsigned int i;
1483 1483
1484 info->size = 0; 1484 info->size = 0;
@@ -1517,34 +1517,26 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1517 /* 1517 /*
1518 * Allocate a structure for each thread. 1518 * Allocate a structure for each thread.
1519 */ 1519 */
1520 rcu_read_lock(); 1520 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1521 do_each_thread(g, p) 1521 t = kzalloc(offsetof(struct elf_thread_core_info,
1522 if (p->mm == dump_task->mm) { 1522 notes[info->thread_notes]),
1523 if (p->flags & PF_KTHREAD) 1523 GFP_KERNEL);
1524 continue; 1524 if (unlikely(!t))
1525 1525 return 0;
1526 t = kzalloc(offsetof(struct elf_thread_core_info, 1526
1527 notes[info->thread_notes]), 1527 t->task = ct->task;
1528 GFP_ATOMIC); 1528 if (ct->task == dump_task || !info->thread) {
1529 if (unlikely(!t)) { 1529 t->next = info->thread;
1530 rcu_read_unlock(); 1530 info->thread = t;
1531 return 0; 1531 } else {
1532 } 1532 /*
1533 t->task = p; 1533 * Make sure to keep the original task at
1534 if (p == dump_task || !info->thread) { 1534 * the head of the list.
1535 t->next = info->thread; 1535 */
1536 info->thread = t; 1536 t->next = info->thread->next;
1537 } else { 1537 info->thread->next = t;
1538 /*
1539 * Make sure to keep the original task at
1540 * the head of the list.
1541 */
1542 t->next = info->thread->next;
1543 info->thread->next = t;
1544 }
1545 } 1538 }
1546 while_each_thread(g, p); 1539 }
1547 rcu_read_unlock();
1548 1540
1549 /* 1541 /*
1550 * Now fill in each thread's information. 1542 * Now fill in each thread's information.
@@ -1691,7 +1683,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1691{ 1683{
1692#define NUM_NOTES 6 1684#define NUM_NOTES 6
1693 struct list_head *t; 1685 struct list_head *t;
1694 struct task_struct *g, *p;
1695 1686
1696 info->notes = NULL; 1687 info->notes = NULL;
1697 info->prstatus = NULL; 1688 info->prstatus = NULL;
@@ -1723,23 +1714,19 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1723 1714
1724 info->thread_status_size = 0; 1715 info->thread_status_size = 0;
1725 if (signr) { 1716 if (signr) {
1717 struct core_thread *ct;
1726 struct elf_thread_status *ets; 1718 struct elf_thread_status *ets;
1727 rcu_read_lock(); 1719
1728 do_each_thread(g, p) 1720 for (ct = current->mm->core_state->dumper.next;
1729 if (current->mm == p->mm && current != p) { 1721 ct; ct = ct->next) {
1730 if (p->flags & PF_KTHREAD) 1722 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1731 continue; 1723 if (!ets)
1732 1724 return 0;
1733 ets = kzalloc(sizeof(*ets), GFP_ATOMIC); 1725
1734 if (!ets) { 1726 ets->thread = ct->task;
1735 rcu_read_unlock(); 1727 list_add(&ets->list, &info->thread_list);
1736 return 0; 1728 }
1737 } 1729
1738 ets->thread = p;
1739 list_add(&ets->list, &info->thread_list);
1740 }
1741 while_each_thread(g, p);
1742 rcu_read_unlock();
1743 list_for_each(t, &info->thread_list) { 1730 list_for_each(t, &info->thread_list) {
1744 int sz; 1731 int sz;
1745 1732