diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-11 19:42:46 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-11 19:42:46 -0400 |
commit | 5ab78ff693d09a6ffc7ca80ad600b2f5feb89d7f (patch) | |
tree | 5be171179a73ffe9f3fe7a5aa825acd74ffe47e8 /arch/sh | |
parent | 74db2479c1fecefd0a190f282f28f00565309807 (diff) | |
parent | c2d474d6f8b48b6698343cfc1a3630c4647aa7b2 (diff) |
Merge branch 'sh/dwarf-unwinder' of git://github.com/mfleming/linux-2.6 into sh/dwarf-unwinder
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/dwarf.h | 16 | ||||
-rw-r--r-- | arch/sh/kernel/dwarf.c | 179 | ||||
-rw-r--r-- | arch/sh/kernel/module.c | 32 |
3 files changed, 180 insertions, 47 deletions
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index c367ed3373c5..eef87539963d 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h | |||
@@ -241,6 +241,12 @@ struct dwarf_cie { | |||
241 | 241 | ||
242 | unsigned long flags; | 242 | unsigned long flags; |
243 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) | 243 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) |
244 | |||
245 | /* | ||
246 | * 'mod' will be non-NULL if this CIE came from a module's | ||
247 | * .eh_frame section. | ||
248 | */ | ||
249 | struct module *mod; | ||
244 | }; | 250 | }; |
245 | 251 | ||
246 | /** | 252 | /** |
@@ -255,6 +261,12 @@ struct dwarf_fde { | |||
255 | unsigned char *instructions; | 261 | unsigned char *instructions; |
256 | unsigned char *end; | 262 | unsigned char *end; |
257 | struct list_head link; | 263 | struct list_head link; |
264 | |||
265 | /* | ||
266 | * 'mod' will be non-NULL if this FDE came from a module's | ||
267 | * .eh_frame section. | ||
268 | */ | ||
269 | struct module *mod; | ||
258 | }; | 270 | }; |
259 | 271 | ||
260 | /** | 272 | /** |
@@ -364,6 +376,10 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) | |||
364 | 376 | ||
365 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, | 377 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, |
366 | struct dwarf_frame *); | 378 | struct dwarf_frame *); |
379 | extern void dwarf_free_frame(struct dwarf_frame *); | ||
380 | extern int dwarf_parse_section(char *, char *, struct module *); | ||
381 | extern void dwarf_module_unload(struct module *); | ||
382 | |||
367 | #endif /* !__ASSEMBLY__ */ | 383 | #endif /* !__ASSEMBLY__ */ |
368 | 384 | ||
369 | #define CFI_STARTPROC .cfi_startproc | 385 | #define CFI_STARTPROC .cfi_startproc |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 577302f31e6a..f242cd120cf1 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -530,7 +530,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
530 | } | 530 | } |
531 | 531 | ||
532 | /** | 532 | /** |
533 | * dwarf_unwind_stack - recursively unwind the stack | 533 | * dwarf_free_frame - free the memory allocated for @frame |
534 | * @frame: the frame to free | ||
535 | */ | ||
536 | void dwarf_free_frame(struct dwarf_frame *frame) | ||
537 | { | ||
538 | dwarf_frame_free_regs(frame); | ||
539 | mempool_free(frame, dwarf_frame_pool); | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * dwarf_unwind_stack - unwind the stack | ||
544 | * | ||
534 | * @pc: address of the function to unwind | 545 | * @pc: address of the function to unwind |
535 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | 546 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
536 | * | 547 | * |
@@ -548,9 +559,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
548 | unsigned long addr; | 559 | unsigned long addr; |
549 | 560 | ||
550 | /* | 561 | /* |
551 | * If this is the first invocation of this recursive function we | 562 | * If we're starting at the top of the stack we need get the |
552 | * need get the contents of a physical register to get the CFA | 563 | * contents of a physical register to get the CFA in order to |
553 | * in order to begin the virtual unwinding of the stack. | 564 | * begin the virtual unwinding of the stack. |
554 | * | 565 | * |
555 | * NOTE: the return address is guaranteed to be setup by the | 566 | * NOTE: the return address is guaranteed to be setup by the |
556 | * time this function makes its first function call. | 567 | * time this function makes its first function call. |
@@ -572,9 +583,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
572 | fde = dwarf_lookup_fde(pc); | 583 | fde = dwarf_lookup_fde(pc); |
573 | if (!fde) { | 584 | if (!fde) { |
574 | /* | 585 | /* |
575 | * This is our normal exit path - the one that stops the | 586 | * This is our normal exit path. There are two reasons |
576 | * recursion. There's two reasons why we might exit | 587 | * why we might exit here, |
577 | * here, | ||
578 | * | 588 | * |
579 | * a) pc has no asscociated DWARF frame info and so | 589 | * a) pc has no asscociated DWARF frame info and so |
580 | * we don't know how to unwind this frame. This is | 590 | * we don't know how to unwind this frame. This is |
@@ -616,10 +626,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
616 | 626 | ||
617 | } else { | 627 | } else { |
618 | /* | 628 | /* |
619 | * Again, this is the first invocation of this | 629 | * Again, we're starting from the top of the |
620 | * recurisve function. We need to physically | 630 | * stack. We need to physically read |
621 | * read the contents of a register in order to | 631 | * the contents of a register in order to get |
622 | * get the Canonical Frame Address for this | 632 | * the Canonical Frame Address for this |
623 | * function. | 633 | * function. |
624 | */ | 634 | */ |
625 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | 635 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
@@ -649,13 +659,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
649 | return frame; | 659 | return frame; |
650 | 660 | ||
651 | bail: | 661 | bail: |
652 | dwarf_frame_free_regs(frame); | 662 | dwarf_free_frame(frame); |
653 | mempool_free(frame, dwarf_frame_pool); | ||
654 | return NULL; | 663 | return NULL; |
655 | } | 664 | } |
656 | 665 | ||
657 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 666 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
658 | unsigned char *end) | 667 | unsigned char *end, struct module *mod) |
659 | { | 668 | { |
660 | struct dwarf_cie *cie; | 669 | struct dwarf_cie *cie; |
661 | unsigned long flags; | 670 | unsigned long flags; |
@@ -751,6 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
751 | cie->initial_instructions = p; | 760 | cie->initial_instructions = p; |
752 | cie->instructions_end = end; | 761 | cie->instructions_end = end; |
753 | 762 | ||
763 | cie->mod = mod; | ||
764 | |||
754 | /* Add to list */ | 765 | /* Add to list */ |
755 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 766 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
756 | list_add_tail(&cie->link, &dwarf_cie_list); | 767 | list_add_tail(&cie->link, &dwarf_cie_list); |
@@ -761,7 +772,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
761 | 772 | ||
762 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 773 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
763 | void *start, unsigned long len, | 774 | void *start, unsigned long len, |
764 | unsigned char *end) | 775 | unsigned char *end, struct module *mod) |
765 | { | 776 | { |
766 | struct dwarf_fde *fde; | 777 | struct dwarf_fde *fde; |
767 | struct dwarf_cie *cie; | 778 | struct dwarf_cie *cie; |
@@ -810,6 +821,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
810 | fde->instructions = p; | 821 | fde->instructions = p; |
811 | fde->end = end; | 822 | fde->end = end; |
812 | 823 | ||
824 | fde->mod = mod; | ||
825 | |||
813 | /* Add to list. */ | 826 | /* Add to list. */ |
814 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 827 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
815 | list_add_tail(&fde->link, &dwarf_fde_list); | 828 | list_add_tail(&fde->link, &dwarf_fde_list); |
@@ -833,10 +846,8 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
833 | while (1) { | 846 | while (1) { |
834 | frame = dwarf_unwind_stack(return_addr, _frame); | 847 | frame = dwarf_unwind_stack(return_addr, _frame); |
835 | 848 | ||
836 | if (_frame) { | 849 | if (_frame) |
837 | dwarf_frame_free_regs(_frame); | 850 | dwarf_free_frame(_frame); |
838 | mempool_free(_frame, dwarf_frame_pool); | ||
839 | } | ||
840 | 851 | ||
841 | _frame = frame; | 852 | _frame = frame; |
842 | 853 | ||
@@ -846,6 +857,9 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
846 | return_addr = frame->return_addr; | 857 | return_addr = frame->return_addr; |
847 | ops->address(data, return_addr, 1); | 858 | ops->address(data, return_addr, 1); |
848 | } | 859 | } |
860 | |||
861 | if (frame) | ||
862 | dwarf_free_frame(frame); | ||
849 | } | 863 | } |
850 | 864 | ||
851 | static struct unwinder dwarf_unwinder = { | 865 | static struct unwinder dwarf_unwinder = { |
@@ -875,15 +889,15 @@ static void dwarf_unwinder_cleanup(void) | |||
875 | } | 889 | } |
876 | 890 | ||
877 | /** | 891 | /** |
878 | * dwarf_unwinder_init - initialise the dwarf unwinder | 892 | * dwarf_parse_section - parse DWARF section |
893 | * @eh_frame_start: start address of the .eh_frame section | ||
894 | * @eh_frame_end: end address of the .eh_frame section | ||
895 | * @mod: the kernel module containing the .eh_frame section | ||
879 | * | 896 | * |
880 | * Build the data structures describing the .dwarf_frame section to | 897 | * Parse the information in a .eh_frame section. |
881 | * make it easier to lookup CIE and FDE entries. Because the | ||
882 | * .eh_frame section is packed as tightly as possible it is not | ||
883 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
884 | * and CIE entries that make it easier. | ||
885 | */ | 898 | */ |
886 | static int __init dwarf_unwinder_init(void) | 899 | int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
900 | struct module *mod) | ||
887 | { | 901 | { |
888 | u32 entry_type; | 902 | u32 entry_type; |
889 | void *p, *entry; | 903 | void *p, *entry; |
@@ -891,29 +905,12 @@ static int __init dwarf_unwinder_init(void) | |||
891 | unsigned long len; | 905 | unsigned long len; |
892 | unsigned int c_entries, f_entries; | 906 | unsigned int c_entries, f_entries; |
893 | unsigned char *end; | 907 | unsigned char *end; |
894 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
895 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
896 | 908 | ||
897 | c_entries = 0; | 909 | c_entries = 0; |
898 | f_entries = 0; | 910 | f_entries = 0; |
899 | entry = &__start_eh_frame; | 911 | entry = eh_frame_start; |
900 | 912 | ||
901 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | 913 | while ((char *)entry < eh_frame_end) { |
902 | sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL); | ||
903 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
904 | sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL); | ||
905 | |||
906 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
907 | mempool_alloc_slab, | ||
908 | mempool_free_slab, | ||
909 | dwarf_frame_cachep); | ||
910 | |||
911 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
912 | mempool_alloc_slab, | ||
913 | mempool_free_slab, | ||
914 | dwarf_reg_cachep); | ||
915 | |||
916 | while ((char *)entry < __stop_eh_frame) { | ||
917 | p = entry; | 914 | p = entry; |
918 | 915 | ||
919 | count = dwarf_entry_len(p, &len); | 916 | count = dwarf_entry_len(p, &len); |
@@ -925,6 +922,7 @@ static int __init dwarf_unwinder_init(void) | |||
925 | * entry and move to the next one because 'len' | 922 | * entry and move to the next one because 'len' |
926 | * tells us where our next entry is. | 923 | * tells us where our next entry is. |
927 | */ | 924 | */ |
925 | err = -EINVAL; | ||
928 | goto out; | 926 | goto out; |
929 | } else | 927 | } else |
930 | p += count; | 928 | p += count; |
@@ -936,13 +934,14 @@ static int __init dwarf_unwinder_init(void) | |||
936 | p += 4; | 934 | p += 4; |
937 | 935 | ||
938 | if (entry_type == DW_EH_FRAME_CIE) { | 936 | if (entry_type == DW_EH_FRAME_CIE) { |
939 | err = dwarf_parse_cie(entry, p, len, end); | 937 | err = dwarf_parse_cie(entry, p, len, end, mod); |
940 | if (err < 0) | 938 | if (err < 0) |
941 | goto out; | 939 | goto out; |
942 | else | 940 | else |
943 | c_entries++; | 941 | c_entries++; |
944 | } else { | 942 | } else { |
945 | err = dwarf_parse_fde(entry, entry_type, p, len, end); | 943 | err = dwarf_parse_fde(entry, entry_type, p, len, |
944 | end, mod); | ||
946 | if (err < 0) | 945 | if (err < 0) |
947 | goto out; | 946 | goto out; |
948 | else | 947 | else |
@@ -955,6 +954,92 @@ static int __init dwarf_unwinder_init(void) | |||
955 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | 954 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
956 | c_entries, f_entries); | 955 | c_entries, f_entries); |
957 | 956 | ||
957 | return 0; | ||
958 | |||
959 | out: | ||
960 | return err; | ||
961 | } | ||
962 | |||
963 | /** | ||
964 | * dwarf_module_unload - remove FDE/CIEs associated with @mod | ||
965 | * @mod: the module that is being unloaded | ||
966 | * | ||
967 | * Remove any FDEs and CIEs from the global lists that came from | ||
968 | * @mod's .eh_frame section because @mod is being unloaded. | ||
969 | */ | ||
970 | void dwarf_module_unload(struct module *mod) | ||
971 | { | ||
972 | struct dwarf_fde *fde; | ||
973 | struct dwarf_cie *cie; | ||
974 | unsigned long flags; | ||
975 | |||
976 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
977 | |||
978 | again_cie: | ||
979 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
980 | if (cie->mod == mod) | ||
981 | break; | ||
982 | } | ||
983 | |||
984 | if (&cie->link != &dwarf_cie_list) { | ||
985 | list_del(&cie->link); | ||
986 | kfree(cie); | ||
987 | goto again_cie; | ||
988 | } | ||
989 | |||
990 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
991 | |||
992 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
993 | |||
994 | again_fde: | ||
995 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
996 | if (fde->mod == mod) | ||
997 | break; | ||
998 | } | ||
999 | |||
1000 | if (&fde->link != &dwarf_fde_list) { | ||
1001 | list_del(&fde->link); | ||
1002 | kfree(fde); | ||
1003 | goto again_fde; | ||
1004 | } | ||
1005 | |||
1006 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
1007 | } | ||
1008 | |||
1009 | /** | ||
1010 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
1011 | * | ||
1012 | * Build the data structures describing the .dwarf_frame section to | ||
1013 | * make it easier to lookup CIE and FDE entries. Because the | ||
1014 | * .eh_frame section is packed as tightly as possible it is not | ||
1015 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
1016 | * and CIE entries that make it easier. | ||
1017 | */ | ||
1018 | static int __init dwarf_unwinder_init(void) | ||
1019 | { | ||
1020 | int err; | ||
1021 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
1022 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
1023 | |||
1024 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
1025 | sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL); | ||
1026 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
1027 | sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL); | ||
1028 | |||
1029 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
1030 | mempool_alloc_slab, | ||
1031 | mempool_free_slab, | ||
1032 | dwarf_frame_cachep); | ||
1033 | |||
1034 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
1035 | mempool_alloc_slab, | ||
1036 | mempool_free_slab, | ||
1037 | dwarf_reg_cachep); | ||
1038 | |||
1039 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | ||
1040 | if (err) | ||
1041 | goto out; | ||
1042 | |||
958 | err = unwinder_register(&dwarf_unwinder); | 1043 | err = unwinder_register(&dwarf_unwinder); |
959 | if (err) | 1044 | if (err) |
960 | goto out; | 1045 | goto out; |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index c2efdcde266f..d297a148d16c 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
35 | #include <asm/dwarf.h> | ||
35 | 36 | ||
36 | void *module_alloc(unsigned long size) | 37 | void *module_alloc(unsigned long size) |
37 | { | 38 | { |
@@ -145,10 +146,41 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
145 | const Elf_Shdr *sechdrs, | 146 | const Elf_Shdr *sechdrs, |
146 | struct module *me) | 147 | struct module *me) |
147 | { | 148 | { |
149 | #ifdef CONFIG_DWARF_UNWINDER | ||
150 | unsigned int i, err; | ||
151 | unsigned long start, end; | ||
152 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
153 | |||
154 | start = end = 0; | ||
155 | |||
156 | for (i = 1; i < hdr->e_shnum; i++) { | ||
157 | /* Alloc bit cleared means "ignore it." */ | ||
158 | if ((sechdrs[i].sh_flags & SHF_ALLOC) | ||
159 | && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | ||
160 | start = sechdrs[i].sh_addr; | ||
161 | end = start + sechdrs[i].sh_size; | ||
162 | break; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* Did we find the .eh_frame section? */ | ||
167 | if (i != hdr->e_shnum) { | ||
168 | err = dwarf_parse_section((char *)start, (char *)end, me); | ||
169 | if (err) | ||
170 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | ||
171 | me->name); | ||
172 | } | ||
173 | |||
174 | #endif /* CONFIG_DWARF_UNWINDER */ | ||
175 | |||
148 | return module_bug_finalize(hdr, sechdrs, me); | 176 | return module_bug_finalize(hdr, sechdrs, me); |
149 | } | 177 | } |
150 | 178 | ||
151 | void module_arch_cleanup(struct module *mod) | 179 | void module_arch_cleanup(struct module *mod) |
152 | { | 180 | { |
153 | module_bug_cleanup(mod); | 181 | module_bug_cleanup(mod); |
182 | |||
183 | #ifdef CONFIG_DWARF_UNWINDER | ||
184 | dwarf_module_unload(mod); | ||
185 | #endif /* CONFIG_DWARF_UNWINDER */ | ||
154 | } | 186 | } |