diff options
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
| -rw-r--r-- | arch/sh/kernel/dwarf.c | 222 |
1 files changed, 171 insertions, 51 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d76a23170dbb..3576b709f052 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 21 | #include <linux/mempool.h> | 21 | #include <linux/mempool.h> |
| 22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
| 23 | #include <linux/elf.h> | ||
| 23 | #include <linux/ftrace.h> | 24 | #include <linux/ftrace.h> |
| 24 | #include <asm/dwarf.h> | 25 | #include <asm/dwarf.h> |
| 25 | #include <asm/unwinder.h> | 26 | #include <asm/unwinder.h> |
| @@ -530,7 +531,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
| 530 | } | 531 | } |
| 531 | 532 | ||
| 532 | /** | 533 | /** |
| 533 | * dwarf_unwind_stack - recursively unwind the stack | 534 | * dwarf_free_frame - free the memory allocated for @frame |
| 535 | * @frame: the frame to free | ||
| 536 | */ | ||
| 537 | void dwarf_free_frame(struct dwarf_frame *frame) | ||
| 538 | { | ||
| 539 | dwarf_frame_free_regs(frame); | ||
| 540 | mempool_free(frame, dwarf_frame_pool); | ||
| 541 | } | ||
| 542 | |||
| 543 | /** | ||
| 544 | * dwarf_unwind_stack - unwind the stack | ||
| 545 | * | ||
| 534 | * @pc: address of the function to unwind | 546 | * @pc: address of the function to unwind |
| 535 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | 547 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
| 536 | * | 548 | * |
| @@ -548,9 +560,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
| 548 | unsigned long addr; | 560 | unsigned long addr; |
| 549 | 561 | ||
| 550 | /* | 562 | /* |
| 551 | * If this is the first invocation of this recursive function we | 563 | * If we're starting at the top of the stack we need get the |
| 552 | * need get the contents of a physical register to get the CFA | 564 | * contents of a physical register to get the CFA in order to |
| 553 | * in order to begin the virtual unwinding of the stack. | 565 | * begin the virtual unwinding of the stack. |
| 554 | * | 566 | * |
| 555 | * NOTE: the return address is guaranteed to be setup by the | 567 | * NOTE: the return address is guaranteed to be setup by the |
| 556 | * time this function makes its first function call. | 568 | * time this function makes its first function call. |
| @@ -593,9 +605,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
| 593 | fde = dwarf_lookup_fde(pc); | 605 | fde = dwarf_lookup_fde(pc); |
| 594 | if (!fde) { | 606 | if (!fde) { |
| 595 | /* | 607 | /* |
| 596 | * This is our normal exit path - the one that stops the | 608 | * This is our normal exit path. There are two reasons |
| 597 | * recursion. There's two reasons why we might exit | 609 | * why we might exit here, |
| 598 | * here, | ||
| 599 | * | 610 | * |
| 600 | * a) pc has no asscociated DWARF frame info and so | 611 | * a) pc has no asscociated DWARF frame info and so |
| 601 | * we don't know how to unwind this frame. This is | 612 | * we don't know how to unwind this frame. This is |
| @@ -637,10 +648,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
| 637 | 648 | ||
| 638 | } else { | 649 | } else { |
| 639 | /* | 650 | /* |
| 640 | * Again, this is the first invocation of this | 651 | * Again, we're starting from the top of the |
| 641 | * recurisve function. We need to physically | 652 | * stack. We need to physically read |
| 642 | * read the contents of a register in order to | 653 | * the contents of a register in order to get |
| 643 | * get the Canonical Frame Address for this | 654 | * the Canonical Frame Address for this |
| 644 | * function. | 655 | * function. |
| 645 | */ | 656 | */ |
| 646 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | 657 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
| @@ -670,13 +681,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
| 670 | return frame; | 681 | return frame; |
| 671 | 682 | ||
| 672 | bail: | 683 | bail: |
| 673 | dwarf_frame_free_regs(frame); | 684 | dwarf_free_frame(frame); |
| 674 | mempool_free(frame, dwarf_frame_pool); | ||
| 675 | return NULL; | 685 | return NULL; |
| 676 | } | 686 | } |
| 677 | 687 | ||
| 678 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 688 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
| 679 | unsigned char *end) | 689 | unsigned char *end, struct module *mod) |
| 680 | { | 690 | { |
| 681 | struct dwarf_cie *cie; | 691 | struct dwarf_cie *cie; |
| 682 | unsigned long flags; | 692 | unsigned long flags; |
| @@ -772,6 +782,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
| 772 | cie->initial_instructions = p; | 782 | cie->initial_instructions = p; |
| 773 | cie->instructions_end = end; | 783 | cie->instructions_end = end; |
| 774 | 784 | ||
| 785 | cie->mod = mod; | ||
| 786 | |||
| 775 | /* Add to list */ | 787 | /* Add to list */ |
| 776 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 788 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
| 777 | list_add_tail(&cie->link, &dwarf_cie_list); | 789 | list_add_tail(&cie->link, &dwarf_cie_list); |
| @@ -782,7 +794,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
| 782 | 794 | ||
| 783 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 795 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
| 784 | void *start, unsigned long len, | 796 | void *start, unsigned long len, |
| 785 | unsigned char *end) | 797 | unsigned char *end, struct module *mod) |
| 786 | { | 798 | { |
| 787 | struct dwarf_fde *fde; | 799 | struct dwarf_fde *fde; |
| 788 | struct dwarf_cie *cie; | 800 | struct dwarf_cie *cie; |
| @@ -831,6 +843,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
| 831 | fde->instructions = p; | 843 | fde->instructions = p; |
| 832 | fde->end = end; | 844 | fde->end = end; |
| 833 | 845 | ||
| 846 | fde->mod = mod; | ||
| 847 | |||
| 834 | /* Add to list. */ | 848 | /* Add to list. */ |
| 835 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 849 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
| 836 | list_add_tail(&fde->link, &dwarf_fde_list); | 850 | list_add_tail(&fde->link, &dwarf_fde_list); |
| @@ -854,10 +868,8 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
| 854 | while (1) { | 868 | while (1) { |
| 855 | frame = dwarf_unwind_stack(return_addr, _frame); | 869 | frame = dwarf_unwind_stack(return_addr, _frame); |
| 856 | 870 | ||
| 857 | if (_frame) { | 871 | if (_frame) |
| 858 | dwarf_frame_free_regs(_frame); | 872 | dwarf_free_frame(_frame); |
| 859 | mempool_free(_frame, dwarf_frame_pool); | ||
| 860 | } | ||
| 861 | 873 | ||
| 862 | _frame = frame; | 874 | _frame = frame; |
| 863 | 875 | ||
| @@ -867,6 +879,9 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
| 867 | return_addr = frame->return_addr; | 879 | return_addr = frame->return_addr; |
| 868 | ops->address(data, return_addr, 1); | 880 | ops->address(data, return_addr, 1); |
| 869 | } | 881 | } |
| 882 | |||
| 883 | if (frame) | ||
| 884 | dwarf_free_frame(frame); | ||
| 870 | } | 885 | } |
| 871 | 886 | ||
| 872 | static struct unwinder dwarf_unwinder = { | 887 | static struct unwinder dwarf_unwinder = { |
| @@ -896,48 +911,28 @@ static void dwarf_unwinder_cleanup(void) | |||
| 896 | } | 911 | } |
| 897 | 912 | ||
| 898 | /** | 913 | /** |
| 899 | * dwarf_unwinder_init - initialise the dwarf unwinder | 914 | * dwarf_parse_section - parse DWARF section |
| 915 | * @eh_frame_start: start address of the .eh_frame section | ||
| 916 | * @eh_frame_end: end address of the .eh_frame section | ||
| 917 | * @mod: the kernel module containing the .eh_frame section | ||
| 900 | * | 918 | * |
| 901 | * Build the data structures describing the .dwarf_frame section to | 919 | * Parse the information in a .eh_frame section. |
| 902 | * make it easier to lookup CIE and FDE entries. Because the | ||
| 903 | * .eh_frame section is packed as tightly as possible it is not | ||
| 904 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
| 905 | * and CIE entries that make it easier. | ||
| 906 | */ | 920 | */ |
| 907 | static int __init dwarf_unwinder_init(void) | 921 | static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
| 922 | struct module *mod) | ||
| 908 | { | 923 | { |
| 909 | u32 entry_type; | 924 | u32 entry_type; |
| 910 | void *p, *entry; | 925 | void *p, *entry; |
| 911 | int count, err = 0; | 926 | int count, err = 0; |
| 912 | unsigned long len; | 927 | unsigned long len = 0; |
| 913 | unsigned int c_entries, f_entries; | 928 | unsigned int c_entries, f_entries; |
| 914 | unsigned char *end; | 929 | unsigned char *end; |
| 915 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
| 916 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
| 917 | 930 | ||
| 918 | c_entries = 0; | 931 | c_entries = 0; |
| 919 | f_entries = 0; | 932 | f_entries = 0; |
| 920 | entry = &__start_eh_frame; | 933 | entry = eh_frame_start; |
| 921 | |||
| 922 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
| 923 | sizeof(struct dwarf_frame), 0, | ||
| 924 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
| 925 | |||
| 926 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
| 927 | sizeof(struct dwarf_reg), 0, | ||
| 928 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
| 929 | 934 | ||
| 930 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | 935 | while ((char *)entry < eh_frame_end) { |
| 931 | mempool_alloc_slab, | ||
| 932 | mempool_free_slab, | ||
| 933 | dwarf_frame_cachep); | ||
| 934 | |||
| 935 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
| 936 | mempool_alloc_slab, | ||
| 937 | mempool_free_slab, | ||
| 938 | dwarf_reg_cachep); | ||
| 939 | |||
| 940 | while ((char *)entry < __stop_eh_frame) { | ||
| 941 | p = entry; | 936 | p = entry; |
| 942 | 937 | ||
| 943 | count = dwarf_entry_len(p, &len); | 938 | count = dwarf_entry_len(p, &len); |
| @@ -949,6 +944,7 @@ static int __init dwarf_unwinder_init(void) | |||
| 949 | * entry and move to the next one because 'len' | 944 | * entry and move to the next one because 'len' |
| 950 | * tells us where our next entry is. | 945 | * tells us where our next entry is. |
| 951 | */ | 946 | */ |
| 947 | err = -EINVAL; | ||
| 952 | goto out; | 948 | goto out; |
| 953 | } else | 949 | } else |
| 954 | p += count; | 950 | p += count; |
| @@ -960,13 +956,14 @@ static int __init dwarf_unwinder_init(void) | |||
| 960 | p += 4; | 956 | p += 4; |
| 961 | 957 | ||
| 962 | if (entry_type == DW_EH_FRAME_CIE) { | 958 | if (entry_type == DW_EH_FRAME_CIE) { |
| 963 | err = dwarf_parse_cie(entry, p, len, end); | 959 | err = dwarf_parse_cie(entry, p, len, end, mod); |
| 964 | if (err < 0) | 960 | if (err < 0) |
| 965 | goto out; | 961 | goto out; |
| 966 | else | 962 | else |
| 967 | c_entries++; | 963 | c_entries++; |
| 968 | } else { | 964 | } else { |
| 969 | err = dwarf_parse_fde(entry, entry_type, p, len, end); | 965 | err = dwarf_parse_fde(entry, entry_type, p, len, |
| 966 | end, mod); | ||
| 970 | if (err < 0) | 967 | if (err < 0) |
| 971 | goto out; | 968 | goto out; |
| 972 | else | 969 | else |
| @@ -979,6 +976,129 @@ static int __init dwarf_unwinder_init(void) | |||
| 979 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | 976 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
| 980 | c_entries, f_entries); | 977 | c_entries, f_entries); |
| 981 | 978 | ||
| 979 | return 0; | ||
| 980 | |||
| 981 | out: | ||
| 982 | return err; | ||
| 983 | } | ||
| 984 | |||
| 985 | #ifdef CONFIG_MODULES | ||
| 986 | int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | ||
| 987 | struct module *me) | ||
| 988 | { | ||
| 989 | unsigned int i, err; | ||
| 990 | unsigned long start, end; | ||
| 991 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
| 992 | |||
| 993 | start = end = 0; | ||
| 994 | |||
| 995 | for (i = 1; i < hdr->e_shnum; i++) { | ||
| 996 | /* Alloc bit cleared means "ignore it." */ | ||
| 997 | if ((sechdrs[i].sh_flags & SHF_ALLOC) | ||
| 998 | && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | ||
| 999 | start = sechdrs[i].sh_addr; | ||
| 1000 | end = start + sechdrs[i].sh_size; | ||
| 1001 | break; | ||
| 1002 | } | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | /* Did we find the .eh_frame section? */ | ||
| 1006 | if (i != hdr->e_shnum) { | ||
| 1007 | err = dwarf_parse_section((char *)start, (char *)end, me); | ||
| 1008 | if (err) { | ||
| 1009 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | ||
| 1010 | me->name); | ||
| 1011 | return err; | ||
| 1012 | } | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | return 0; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | /** | ||
| 1019 | * module_dwarf_cleanup - remove FDE/CIEs associated with @mod | ||
| 1020 | * @mod: the module that is being unloaded | ||
| 1021 | * | ||
| 1022 | * Remove any FDEs and CIEs from the global lists that came from | ||
| 1023 | * @mod's .eh_frame section because @mod is being unloaded. | ||
| 1024 | */ | ||
| 1025 | void module_dwarf_cleanup(struct module *mod) | ||
| 1026 | { | ||
| 1027 | struct dwarf_fde *fde; | ||
| 1028 | struct dwarf_cie *cie; | ||
| 1029 | unsigned long flags; | ||
| 1030 | |||
| 1031 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
| 1032 | |||
| 1033 | again_cie: | ||
| 1034 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
| 1035 | if (cie->mod == mod) | ||
| 1036 | break; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | if (&cie->link != &dwarf_cie_list) { | ||
| 1040 | list_del(&cie->link); | ||
| 1041 | kfree(cie); | ||
| 1042 | goto again_cie; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
| 1046 | |||
| 1047 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
| 1048 | |||
| 1049 | again_fde: | ||
| 1050 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
| 1051 | if (fde->mod == mod) | ||
| 1052 | break; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | if (&fde->link != &dwarf_fde_list) { | ||
| 1056 | list_del(&fde->link); | ||
| 1057 | kfree(fde); | ||
| 1058 | goto again_fde; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
| 1062 | } | ||
| 1063 | #endif /* CONFIG_MODULES */ | ||
| 1064 | |||
| 1065 | /** | ||
| 1066 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
| 1067 | * | ||
| 1068 | * Build the data structures describing the .dwarf_frame section to | ||
| 1069 | * make it easier to lookup CIE and FDE entries. Because the | ||
| 1070 | * .eh_frame section is packed as tightly as possible it is not | ||
| 1071 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
| 1072 | * and CIE entries that make it easier. | ||
| 1073 | */ | ||
| 1074 | static int __init dwarf_unwinder_init(void) | ||
| 1075 | { | ||
| 1076 | int err; | ||
| 1077 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
| 1078 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
| 1079 | |||
| 1080 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
| 1081 | sizeof(struct dwarf_frame), 0, | ||
| 1082 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
| 1083 | |||
| 1084 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
| 1085 | sizeof(struct dwarf_reg), 0, | ||
| 1086 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
| 1087 | |||
| 1088 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
| 1089 | mempool_alloc_slab, | ||
| 1090 | mempool_free_slab, | ||
| 1091 | dwarf_frame_cachep); | ||
| 1092 | |||
| 1093 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
| 1094 | mempool_alloc_slab, | ||
| 1095 | mempool_free_slab, | ||
| 1096 | dwarf_reg_cachep); | ||
| 1097 | |||
| 1098 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | ||
| 1099 | if (err) | ||
| 1100 | goto out; | ||
| 1101 | |||
| 982 | err = unwinder_register(&dwarf_unwinder); | 1102 | err = unwinder_register(&dwarf_unwinder); |
| 983 | if (err) | 1103 | if (err) |
| 984 | goto out; | 1104 | goto out; |
