diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-11 19:50:07 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-11 19:50:07 -0400 |
commit | 8ec006c58775869175edee3d23f4525b6df2935a (patch) | |
tree | e70b8d0d191cd97276aa4b370e055485a6a98010 /arch/sh/kernel/dwarf.c | |
parent | 3d4e0cfb3372ee7754f743ab90944540cef4ecc6 (diff) | |
parent | 5ab78ff693d09a6ffc7ca80ad600b2f5feb89d7f (diff) |
Merge branch 'sh/dwarf-unwinder'
Conflicts:
arch/sh/kernel/dwarf.c
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r-- | arch/sh/kernel/dwarf.c | 185 |
1 files changed, 135 insertions, 50 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 03b3616c80a5..c274039e9c8d 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -529,7 +529,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
529 | } | 529 | } |
530 | 530 | ||
531 | /** | 531 | /** |
532 | * dwarf_unwind_stack - recursively unwind the stack | 532 | * dwarf_free_frame - free the memory allocated for @frame |
533 | * @frame: the frame to free | ||
534 | */ | ||
535 | void dwarf_free_frame(struct dwarf_frame *frame) | ||
536 | { | ||
537 | dwarf_frame_free_regs(frame); | ||
538 | mempool_free(frame, dwarf_frame_pool); | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * dwarf_unwind_stack - unwind the stack | ||
543 | * | ||
533 | * @pc: address of the function to unwind | 544 | * @pc: address of the function to unwind |
534 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | 545 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
535 | * | 546 | * |
@@ -547,9 +558,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
547 | unsigned long addr; | 558 | unsigned long addr; |
548 | 559 | ||
549 | /* | 560 | /* |
550 | * If this is the first invocation of this recursive function we | 561 | * If we're starting at the top of the stack we need get the |
551 | * need get the contents of a physical register to get the CFA | 562 | * contents of a physical register to get the CFA in order to |
552 | * in order to begin the virtual unwinding of the stack. | 563 | * begin the virtual unwinding of the stack. |
553 | * | 564 | * |
554 | * NOTE: the return address is guaranteed to be setup by the | 565 | * NOTE: the return address is guaranteed to be setup by the |
555 | * time this function makes its first function call. | 566 | * time this function makes its first function call. |
@@ -571,9 +582,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
571 | fde = dwarf_lookup_fde(pc); | 582 | fde = dwarf_lookup_fde(pc); |
572 | if (!fde) { | 583 | if (!fde) { |
573 | /* | 584 | /* |
574 | * This is our normal exit path - the one that stops the | 585 | * This is our normal exit path. There are two reasons |
575 | * recursion. There's two reasons why we might exit | 586 | * why we might exit here, |
576 | * here, | ||
577 | * | 587 | * |
578 | * a) pc has no asscociated DWARF frame info and so | 588 | * a) pc has no asscociated DWARF frame info and so |
579 | * we don't know how to unwind this frame. This is | 589 | * we don't know how to unwind this frame. This is |
@@ -615,10 +625,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
615 | 625 | ||
616 | } else { | 626 | } else { |
617 | /* | 627 | /* |
618 | * Again, this is the first invocation of this | 628 | * Again, we're starting from the top of the |
619 | * recurisve function. We need to physically | 629 | * stack. We need to physically read |
620 | * read the contents of a register in order to | 630 | * the contents of a register in order to get |
621 | * get the Canonical Frame Address for this | 631 | * the Canonical Frame Address for this |
622 | * function. | 632 | * function. |
623 | */ | 633 | */ |
624 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | 634 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
@@ -648,13 +658,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
648 | return frame; | 658 | return frame; |
649 | 659 | ||
650 | bail: | 660 | bail: |
651 | dwarf_frame_free_regs(frame); | 661 | dwarf_free_frame(frame); |
652 | mempool_free(frame, dwarf_frame_pool); | ||
653 | return NULL; | 662 | return NULL; |
654 | } | 663 | } |
655 | 664 | ||
656 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 665 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
657 | unsigned char *end) | 666 | unsigned char *end, struct module *mod) |
658 | { | 667 | { |
659 | struct dwarf_cie *cie; | 668 | struct dwarf_cie *cie; |
660 | unsigned long flags; | 669 | unsigned long flags; |
@@ -750,6 +759,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
750 | cie->initial_instructions = p; | 759 | cie->initial_instructions = p; |
751 | cie->instructions_end = end; | 760 | cie->instructions_end = end; |
752 | 761 | ||
762 | cie->mod = mod; | ||
763 | |||
753 | /* Add to list */ | 764 | /* Add to list */ |
754 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 765 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
755 | list_add_tail(&cie->link, &dwarf_cie_list); | 766 | list_add_tail(&cie->link, &dwarf_cie_list); |
@@ -760,7 +771,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
760 | 771 | ||
761 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 772 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
762 | void *start, unsigned long len, | 773 | void *start, unsigned long len, |
763 | unsigned char *end) | 774 | unsigned char *end, struct module *mod) |
764 | { | 775 | { |
765 | struct dwarf_fde *fde; | 776 | struct dwarf_fde *fde; |
766 | struct dwarf_cie *cie; | 777 | struct dwarf_cie *cie; |
@@ -809,6 +820,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
809 | fde->instructions = p; | 820 | fde->instructions = p; |
810 | fde->end = end; | 821 | fde->end = end; |
811 | 822 | ||
823 | fde->mod = mod; | ||
824 | |||
812 | /* Add to list. */ | 825 | /* Add to list. */ |
813 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 826 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
814 | list_add_tail(&fde->link, &dwarf_fde_list); | 827 | list_add_tail(&fde->link, &dwarf_fde_list); |
@@ -832,10 +845,8 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
832 | while (1) { | 845 | while (1) { |
833 | frame = dwarf_unwind_stack(return_addr, _frame); | 846 | frame = dwarf_unwind_stack(return_addr, _frame); |
834 | 847 | ||
835 | if (_frame) { | 848 | if (_frame) |
836 | dwarf_frame_free_regs(_frame); | 849 | dwarf_free_frame(_frame); |
837 | mempool_free(_frame, dwarf_frame_pool); | ||
838 | } | ||
839 | 850 | ||
840 | _frame = frame; | 851 | _frame = frame; |
841 | 852 | ||
@@ -845,6 +856,9 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
845 | return_addr = frame->return_addr; | 856 | return_addr = frame->return_addr; |
846 | ops->address(data, return_addr, 1); | 857 | ops->address(data, return_addr, 1); |
847 | } | 858 | } |
859 | |||
860 | if (frame) | ||
861 | dwarf_free_frame(frame); | ||
848 | } | 862 | } |
849 | 863 | ||
850 | static struct unwinder dwarf_unwinder = { | 864 | static struct unwinder dwarf_unwinder = { |
@@ -874,15 +888,15 @@ static void dwarf_unwinder_cleanup(void) | |||
874 | } | 888 | } |
875 | 889 | ||
876 | /** | 890 | /** |
877 | * dwarf_unwinder_init - initialise the dwarf unwinder | 891 | * dwarf_parse_section - parse DWARF section |
892 | * @eh_frame_start: start address of the .eh_frame section | ||
893 | * @eh_frame_end: end address of the .eh_frame section | ||
894 | * @mod: the kernel module containing the .eh_frame section | ||
878 | * | 895 | * |
879 | * Build the data structures describing the .dwarf_frame section to | 896 | * Parse the information in a .eh_frame section. |
880 | * make it easier to lookup CIE and FDE entries. Because the | ||
881 | * .eh_frame section is packed as tightly as possible it is not | ||
882 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
883 | * and CIE entries that make it easier. | ||
884 | */ | 897 | */ |
885 | static int __init dwarf_unwinder_init(void) | 898 | int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
899 | struct module *mod) | ||
886 | { | 900 | { |
887 | u32 entry_type; | 901 | u32 entry_type; |
888 | void *p, *entry; | 902 | void *p, *entry; |
@@ -890,32 +904,12 @@ static int __init dwarf_unwinder_init(void) | |||
890 | unsigned long len; | 904 | unsigned long len; |
891 | unsigned int c_entries, f_entries; | 905 | unsigned int c_entries, f_entries; |
892 | unsigned char *end; | 906 | unsigned char *end; |
893 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
894 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
895 | 907 | ||
896 | c_entries = 0; | 908 | c_entries = 0; |
897 | f_entries = 0; | 909 | f_entries = 0; |
898 | entry = &__start_eh_frame; | 910 | entry = eh_frame_start; |
899 | |||
900 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
901 | sizeof(struct dwarf_frame), 0, | ||
902 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
903 | |||
904 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
905 | sizeof(struct dwarf_reg), 0, | ||
906 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
907 | |||
908 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
909 | mempool_alloc_slab, | ||
910 | mempool_free_slab, | ||
911 | dwarf_frame_cachep); | ||
912 | 911 | ||
913 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | 912 | while ((char *)entry < eh_frame_end) { |
914 | mempool_alloc_slab, | ||
915 | mempool_free_slab, | ||
916 | dwarf_reg_cachep); | ||
917 | |||
918 | while ((char *)entry < __stop_eh_frame) { | ||
919 | p = entry; | 913 | p = entry; |
920 | 914 | ||
921 | count = dwarf_entry_len(p, &len); | 915 | count = dwarf_entry_len(p, &len); |
@@ -927,6 +921,7 @@ static int __init dwarf_unwinder_init(void) | |||
927 | * entry and move to the next one because 'len' | 921 | * entry and move to the next one because 'len' |
928 | * tells us where our next entry is. | 922 | * tells us where our next entry is. |
929 | */ | 923 | */ |
924 | err = -EINVAL; | ||
930 | goto out; | 925 | goto out; |
931 | } else | 926 | } else |
932 | p += count; | 927 | p += count; |
@@ -938,13 +933,14 @@ static int __init dwarf_unwinder_init(void) | |||
938 | p += 4; | 933 | p += 4; |
939 | 934 | ||
940 | if (entry_type == DW_EH_FRAME_CIE) { | 935 | if (entry_type == DW_EH_FRAME_CIE) { |
941 | err = dwarf_parse_cie(entry, p, len, end); | 936 | err = dwarf_parse_cie(entry, p, len, end, mod); |
942 | if (err < 0) | 937 | if (err < 0) |
943 | goto out; | 938 | goto out; |
944 | else | 939 | else |
945 | c_entries++; | 940 | c_entries++; |
946 | } else { | 941 | } else { |
947 | err = dwarf_parse_fde(entry, entry_type, p, len, end); | 942 | err = dwarf_parse_fde(entry, entry_type, p, len, |
943 | end, mod); | ||
948 | if (err < 0) | 944 | if (err < 0) |
949 | goto out; | 945 | goto out; |
950 | else | 946 | else |
@@ -957,6 +953,95 @@ static int __init dwarf_unwinder_init(void) | |||
957 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | 953 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
958 | c_entries, f_entries); | 954 | c_entries, f_entries); |
959 | 955 | ||
956 | return 0; | ||
957 | |||
958 | out: | ||
959 | return err; | ||
960 | } | ||
961 | |||
962 | /** | ||
963 | * dwarf_module_unload - remove FDE/CIEs associated with @mod | ||
964 | * @mod: the module that is being unloaded | ||
965 | * | ||
966 | * Remove any FDEs and CIEs from the global lists that came from | ||
967 | * @mod's .eh_frame section because @mod is being unloaded. | ||
968 | */ | ||
969 | void dwarf_module_unload(struct module *mod) | ||
970 | { | ||
971 | struct dwarf_fde *fde; | ||
972 | struct dwarf_cie *cie; | ||
973 | unsigned long flags; | ||
974 | |||
975 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
976 | |||
977 | again_cie: | ||
978 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
979 | if (cie->mod == mod) | ||
980 | break; | ||
981 | } | ||
982 | |||
983 | if (&cie->link != &dwarf_cie_list) { | ||
984 | list_del(&cie->link); | ||
985 | kfree(cie); | ||
986 | goto again_cie; | ||
987 | } | ||
988 | |||
989 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
990 | |||
991 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
992 | |||
993 | again_fde: | ||
994 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
995 | if (fde->mod == mod) | ||
996 | break; | ||
997 | } | ||
998 | |||
999 | if (&fde->link != &dwarf_fde_list) { | ||
1000 | list_del(&fde->link); | ||
1001 | kfree(fde); | ||
1002 | goto again_fde; | ||
1003 | } | ||
1004 | |||
1005 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
1006 | } | ||
1007 | |||
1008 | /** | ||
1009 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
1010 | * | ||
1011 | * Build the data structures describing the .dwarf_frame section to | ||
1012 | * make it easier to lookup CIE and FDE entries. Because the | ||
1013 | * .eh_frame section is packed as tightly as possible it is not | ||
1014 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
1015 | * and CIE entries that make it easier. | ||
1016 | */ | ||
1017 | static int __init dwarf_unwinder_init(void) | ||
1018 | { | ||
1019 | int err; | ||
1020 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
1021 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
1022 | |||
1023 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
1024 | sizeof(struct dwarf_frame), 0, | ||
1025 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1026 | |||
1027 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
1028 | sizeof(struct dwarf_reg), 0, | ||
1029 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1030 | |||
1031 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
1032 | mempool_alloc_slab, | ||
1033 | mempool_free_slab, | ||
1034 | dwarf_frame_cachep); | ||
1035 | |||
1036 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
1037 | mempool_alloc_slab, | ||
1038 | mempool_free_slab, | ||
1039 | dwarf_reg_cachep); | ||
1040 | |||
1041 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | ||
1042 | if (err) | ||
1043 | goto out; | ||
1044 | |||
960 | err = unwinder_register(&dwarf_unwinder); | 1045 | err = unwinder_register(&dwarf_unwinder); |
961 | if (err) | 1046 | if (err) |
962 | goto out; | 1047 | goto out; |