aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/dwarf.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r--arch/sh/kernel/dwarf.c369
1 files changed, 288 insertions, 81 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index d76a23170dbb..a8234b2010d1 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -20,7 +20,9 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/mempool.h> 21#include <linux/mempool.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/elf.h>
23#include <linux/ftrace.h> 24#include <linux/ftrace.h>
25#include <linux/slab.h>
24#include <asm/dwarf.h> 26#include <asm/dwarf.h>
25#include <asm/unwinder.h> 27#include <asm/unwinder.h>
26#include <asm/sections.h> 28#include <asm/sections.h>
@@ -38,10 +40,10 @@ static mempool_t *dwarf_frame_pool;
38static struct kmem_cache *dwarf_reg_cachep; 40static struct kmem_cache *dwarf_reg_cachep;
39static mempool_t *dwarf_reg_pool; 41static mempool_t *dwarf_reg_pool;
40 42
41static LIST_HEAD(dwarf_cie_list); 43static struct rb_root cie_root;
42static DEFINE_SPINLOCK(dwarf_cie_lock); 44static DEFINE_SPINLOCK(dwarf_cie_lock);
43 45
44static LIST_HEAD(dwarf_fde_list); 46static struct rb_root fde_root;
45static DEFINE_SPINLOCK(dwarf_fde_lock); 47static DEFINE_SPINLOCK(dwarf_fde_lock);
46 48
47static struct dwarf_cie *cached_cie; 49static struct dwarf_cie *cached_cie;
@@ -300,7 +302,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
300 */ 302 */
301static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) 303static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
302{ 304{
303 struct dwarf_cie *cie; 305 struct rb_node **rb_node = &cie_root.rb_node;
306 struct dwarf_cie *cie = NULL;
304 unsigned long flags; 307 unsigned long flags;
305 308
306 spin_lock_irqsave(&dwarf_cie_lock, flags); 309 spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -314,16 +317,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
314 goto out; 317 goto out;
315 } 318 }
316 319
317 list_for_each_entry(cie, &dwarf_cie_list, link) { 320 while (*rb_node) {
318 if (cie->cie_pointer == cie_ptr) { 321 struct dwarf_cie *cie_tmp;
319 cached_cie = cie; 322
320 break; 323 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
324 BUG_ON(!cie_tmp);
325
326 if (cie_ptr == cie_tmp->cie_pointer) {
327 cie = cie_tmp;
328 cached_cie = cie_tmp;
329 goto out;
330 } else {
331 if (cie_ptr < cie_tmp->cie_pointer)
332 rb_node = &(*rb_node)->rb_left;
333 else
334 rb_node = &(*rb_node)->rb_right;
321 } 335 }
322 } 336 }
323 337
324 /* Couldn't find the entry in the list. */
325 if (&cie->link == &dwarf_cie_list)
326 cie = NULL;
327out: 338out:
328 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 339 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
329 return cie; 340 return cie;
@@ -335,25 +346,34 @@ out:
335 */ 346 */
336struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) 347struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
337{ 348{
338 struct dwarf_fde *fde; 349 struct rb_node **rb_node = &fde_root.rb_node;
350 struct dwarf_fde *fde = NULL;
339 unsigned long flags; 351 unsigned long flags;
340 352
341 spin_lock_irqsave(&dwarf_fde_lock, flags); 353 spin_lock_irqsave(&dwarf_fde_lock, flags);
342 354
343 list_for_each_entry(fde, &dwarf_fde_list, link) { 355 while (*rb_node) {
344 unsigned long start, end; 356 struct dwarf_fde *fde_tmp;
357 unsigned long tmp_start, tmp_end;
345 358
346 start = fde->initial_location; 359 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
347 end = fde->initial_location + fde->address_range; 360 BUG_ON(!fde_tmp);
348 361
349 if (pc >= start && pc < end) 362 tmp_start = fde_tmp->initial_location;
350 break; 363 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
351 }
352 364
353 /* Couldn't find the entry in the list. */ 365 if (pc < tmp_start) {
354 if (&fde->link == &dwarf_fde_list) 366 rb_node = &(*rb_node)->rb_left;
355 fde = NULL; 367 } else {
368 if (pc < tmp_end) {
369 fde = fde_tmp;
370 goto out;
371 } else
372 rb_node = &(*rb_node)->rb_right;
373 }
374 }
356 375
376out:
357 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 377 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
358 378
359 return fde; 379 return fde;
@@ -530,7 +550,20 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
530} 550}
531 551
532/** 552/**
533 * dwarf_unwind_stack - recursively unwind the stack 553 * dwarf_free_frame - free the memory allocated for @frame
554 * @frame: the frame to free
555 */
556void dwarf_free_frame(struct dwarf_frame *frame)
557{
558 dwarf_frame_free_regs(frame);
559 mempool_free(frame, dwarf_frame_pool);
560}
561
562extern void ret_from_irq(void);
563
564/**
565 * dwarf_unwind_stack - unwind the stack
566 *
534 * @pc: address of the function to unwind 567 * @pc: address of the function to unwind
535 * @prev: struct dwarf_frame of the previous stackframe on the callstack 568 * @prev: struct dwarf_frame of the previous stackframe on the callstack
536 * 569 *
@@ -538,8 +571,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
538 * on the callstack. Each of the lower (older) stack frames are 571 * on the callstack. Each of the lower (older) stack frames are
539 * linked via the "prev" member. 572 * linked via the "prev" member.
540 */ 573 */
541struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, 574struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
542 struct dwarf_frame *prev) 575 struct dwarf_frame *prev)
543{ 576{
544 struct dwarf_frame *frame; 577 struct dwarf_frame *frame;
545 struct dwarf_cie *cie; 578 struct dwarf_cie *cie;
@@ -548,9 +581,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
548 unsigned long addr; 581 unsigned long addr;
549 582
550 /* 583 /*
551 * If this is the first invocation of this recursive function we 584 * If we're starting at the top of the stack we need get the
552 * need get the contents of a physical register to get the CFA 585 * contents of a physical register to get the CFA in order to
553 * in order to begin the virtual unwinding of the stack. 586 * begin the virtual unwinding of the stack.
554 * 587 *
555 * NOTE: the return address is guaranteed to be setup by the 588 * NOTE: the return address is guaranteed to be setup by the
556 * time this function makes its first function call. 589 * time this function makes its first function call.
@@ -593,9 +626,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
593 fde = dwarf_lookup_fde(pc); 626 fde = dwarf_lookup_fde(pc);
594 if (!fde) { 627 if (!fde) {
595 /* 628 /*
596 * This is our normal exit path - the one that stops the 629 * This is our normal exit path. There are two reasons
597 * recursion. There's two reasons why we might exit 630 * why we might exit here,
598 * here,
599 * 631 *
600 * a) pc has no asscociated DWARF frame info and so 632 * a) pc has no asscociated DWARF frame info and so
601 * we don't know how to unwind this frame. This is 633 * we don't know how to unwind this frame. This is
@@ -637,10 +669,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
637 669
638 } else { 670 } else {
639 /* 671 /*
640 * Again, this is the first invocation of this 672 * Again, we're starting from the top of the
641 * recurisve function. We need to physically 673 * stack. We need to physically read
642 * read the contents of a register in order to 674 * the contents of a register in order to get
643 * get the Canonical Frame Address for this 675 * the Canonical Frame Address for this
644 * function. 676 * function.
645 */ 677 */
646 frame->cfa = dwarf_read_arch_reg(frame->cfa_register); 678 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
@@ -667,17 +699,36 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
667 addr = frame->cfa + reg->addr; 699 addr = frame->cfa + reg->addr;
668 frame->return_addr = __raw_readl(addr); 700 frame->return_addr = __raw_readl(addr);
669 701
702 /*
703 * Ah, the joys of unwinding through interrupts.
704 *
705 * Interrupts are tricky - the DWARF info needs to be _really_
706 * accurate and unfortunately I'm seeing a lot of bogus DWARF
707 * info. For example, I've seen interrupts occur in epilogues
708 * just after the frame pointer (r14) had been restored. The
709 * problem was that the DWARF info claimed that the CFA could be
710 * reached by using the value of the frame pointer before it was
711 * restored.
712 *
713 * So until the compiler can be trusted to produce reliable
714 * DWARF info when it really matters, let's stop unwinding once
715 * we've calculated the function that was interrupted.
716 */
717 if (prev && prev->pc == (unsigned long)ret_from_irq)
718 frame->return_addr = 0;
719
670 return frame; 720 return frame;
671 721
672bail: 722bail:
673 dwarf_frame_free_regs(frame); 723 dwarf_free_frame(frame);
674 mempool_free(frame, dwarf_frame_pool);
675 return NULL; 724 return NULL;
676} 725}
677 726
678static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 727static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
679 unsigned char *end) 728 unsigned char *end, struct module *mod)
680{ 729{
730 struct rb_node **rb_node = &cie_root.rb_node;
731 struct rb_node *parent = *rb_node;
681 struct dwarf_cie *cie; 732 struct dwarf_cie *cie;
682 unsigned long flags; 733 unsigned long flags;
683 int count; 734 int count;
@@ -774,7 +825,28 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
774 825
775 /* Add to list */ 826 /* Add to list */
776 spin_lock_irqsave(&dwarf_cie_lock, flags); 827 spin_lock_irqsave(&dwarf_cie_lock, flags);
777 list_add_tail(&cie->link, &dwarf_cie_list); 828
829 while (*rb_node) {
830 struct dwarf_cie *cie_tmp;
831
832 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
833
834 parent = *rb_node;
835
836 if (cie->cie_pointer < cie_tmp->cie_pointer)
837 rb_node = &parent->rb_left;
838 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
839 rb_node = &parent->rb_right;
840 else
841 WARN_ON(1);
842 }
843
844 rb_link_node(&cie->node, parent, rb_node);
845 rb_insert_color(&cie->node, &cie_root);
846
847 if (mod != NULL)
848 list_add_tail(&cie->link, &mod->arch.cie_list);
849
778 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 850 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
779 851
780 return 0; 852 return 0;
@@ -782,8 +854,10 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
782 854
783static int dwarf_parse_fde(void *entry, u32 entry_type, 855static int dwarf_parse_fde(void *entry, u32 entry_type,
784 void *start, unsigned long len, 856 void *start, unsigned long len,
785 unsigned char *end) 857 unsigned char *end, struct module *mod)
786{ 858{
859 struct rb_node **rb_node = &fde_root.rb_node;
860 struct rb_node *parent = *rb_node;
787 struct dwarf_fde *fde; 861 struct dwarf_fde *fde;
788 struct dwarf_cie *cie; 862 struct dwarf_cie *cie;
789 unsigned long flags; 863 unsigned long flags;
@@ -833,7 +907,36 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
833 907
834 /* Add to list. */ 908 /* Add to list. */
835 spin_lock_irqsave(&dwarf_fde_lock, flags); 909 spin_lock_irqsave(&dwarf_fde_lock, flags);
836 list_add_tail(&fde->link, &dwarf_fde_list); 910
911 while (*rb_node) {
912 struct dwarf_fde *fde_tmp;
913 unsigned long tmp_start, tmp_end;
914 unsigned long start, end;
915
916 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
917
918 start = fde->initial_location;
919 end = fde->initial_location + fde->address_range;
920
921 tmp_start = fde_tmp->initial_location;
922 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
923
924 parent = *rb_node;
925
926 if (start < tmp_start)
927 rb_node = &parent->rb_left;
928 else if (start >= tmp_end)
929 rb_node = &parent->rb_right;
930 else
931 WARN_ON(1);
932 }
933
934 rb_link_node(&fde->node, parent, rb_node);
935 rb_insert_color(&fde->node, &fde_root);
936
937 if (mod != NULL)
938 list_add_tail(&fde->link, &mod->arch.fde_list);
939
837 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 940 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
838 941
839 return 0; 942 return 0;
@@ -854,10 +957,8 @@ static void dwarf_unwinder_dump(struct task_struct *task,
854 while (1) { 957 while (1) {
855 frame = dwarf_unwind_stack(return_addr, _frame); 958 frame = dwarf_unwind_stack(return_addr, _frame);
856 959
857 if (_frame) { 960 if (_frame)
858 dwarf_frame_free_regs(_frame); 961 dwarf_free_frame(_frame);
859 mempool_free(_frame, dwarf_frame_pool);
860 }
861 962
862 _frame = frame; 963 _frame = frame;
863 964
@@ -867,6 +968,9 @@ static void dwarf_unwinder_dump(struct task_struct *task,
867 return_addr = frame->return_addr; 968 return_addr = frame->return_addr;
868 ops->address(data, return_addr, 1); 969 ops->address(data, return_addr, 1);
869 } 970 }
971
972 if (frame)
973 dwarf_free_frame(frame);
870} 974}
871 975
872static struct unwinder dwarf_unwinder = { 976static struct unwinder dwarf_unwinder = {
@@ -877,67 +981,57 @@ static struct unwinder dwarf_unwinder = {
877 981
878static void dwarf_unwinder_cleanup(void) 982static void dwarf_unwinder_cleanup(void)
879{ 983{
880 struct dwarf_cie *cie; 984 struct rb_node **fde_rb_node = &fde_root.rb_node;
881 struct dwarf_fde *fde; 985 struct rb_node **cie_rb_node = &cie_root.rb_node;
882 986
883 /* 987 /*
884 * Deallocate all the memory allocated for the DWARF unwinder. 988 * Deallocate all the memory allocated for the DWARF unwinder.
885 * Traverse all the FDE/CIE lists and remove and free all the 989 * Traverse all the FDE/CIE lists and remove and free all the
886 * memory associated with those data structures. 990 * memory associated with those data structures.
887 */ 991 */
888 list_for_each_entry(cie, &dwarf_cie_list, link) 992 while (*fde_rb_node) {
889 kfree(cie); 993 struct dwarf_fde *fde;
890 994
891 list_for_each_entry(fde, &dwarf_fde_list, link) 995 fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
996 rb_erase(*fde_rb_node, &fde_root);
892 kfree(fde); 997 kfree(fde);
998 }
999
1000 while (*cie_rb_node) {
1001 struct dwarf_cie *cie;
1002
1003 cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
1004 rb_erase(*cie_rb_node, &cie_root);
1005 kfree(cie);
1006 }
893 1007
894 kmem_cache_destroy(dwarf_reg_cachep); 1008 kmem_cache_destroy(dwarf_reg_cachep);
895 kmem_cache_destroy(dwarf_frame_cachep); 1009 kmem_cache_destroy(dwarf_frame_cachep);
896} 1010}
897 1011
898/** 1012/**
899 * dwarf_unwinder_init - initialise the dwarf unwinder 1013 * dwarf_parse_section - parse DWARF section
1014 * @eh_frame_start: start address of the .eh_frame section
1015 * @eh_frame_end: end address of the .eh_frame section
1016 * @mod: the kernel module containing the .eh_frame section
900 * 1017 *
901 * Build the data structures describing the .dwarf_frame section to 1018 * Parse the information in a .eh_frame section.
902 * make it easier to lookup CIE and FDE entries. Because the
903 * .eh_frame section is packed as tightly as possible it is not
904 * easy to lookup the FDE for a given PC, so we build a list of FDE
905 * and CIE entries that make it easier.
906 */ 1019 */
907static int __init dwarf_unwinder_init(void) 1020static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
1021 struct module *mod)
908{ 1022{
909 u32 entry_type; 1023 u32 entry_type;
910 void *p, *entry; 1024 void *p, *entry;
911 int count, err = 0; 1025 int count, err = 0;
912 unsigned long len; 1026 unsigned long len = 0;
913 unsigned int c_entries, f_entries; 1027 unsigned int c_entries, f_entries;
914 unsigned char *end; 1028 unsigned char *end;
915 INIT_LIST_HEAD(&dwarf_cie_list);
916 INIT_LIST_HEAD(&dwarf_fde_list);
917 1029
918 c_entries = 0; 1030 c_entries = 0;
919 f_entries = 0; 1031 f_entries = 0;
920 entry = &__start_eh_frame; 1032 entry = eh_frame_start;
921
922 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
923 sizeof(struct dwarf_frame), 0,
924 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
925
926 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
927 sizeof(struct dwarf_reg), 0,
928 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
929 1033
930 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, 1034 while ((char *)entry < eh_frame_end) {
931 mempool_alloc_slab,
932 mempool_free_slab,
933 dwarf_frame_cachep);
934
935 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
936 mempool_alloc_slab,
937 mempool_free_slab,
938 dwarf_reg_cachep);
939
940 while ((char *)entry < __stop_eh_frame) {
941 p = entry; 1035 p = entry;
942 1036
943 count = dwarf_entry_len(p, &len); 1037 count = dwarf_entry_len(p, &len);
@@ -949,6 +1043,7 @@ static int __init dwarf_unwinder_init(void)
949 * entry and move to the next one because 'len' 1043 * entry and move to the next one because 'len'
950 * tells us where our next entry is. 1044 * tells us where our next entry is.
951 */ 1045 */
1046 err = -EINVAL;
952 goto out; 1047 goto out;
953 } else 1048 } else
954 p += count; 1049 p += count;
@@ -960,13 +1055,14 @@ static int __init dwarf_unwinder_init(void)
960 p += 4; 1055 p += 4;
961 1056
962 if (entry_type == DW_EH_FRAME_CIE) { 1057 if (entry_type == DW_EH_FRAME_CIE) {
963 err = dwarf_parse_cie(entry, p, len, end); 1058 err = dwarf_parse_cie(entry, p, len, end, mod);
964 if (err < 0) 1059 if (err < 0)
965 goto out; 1060 goto out;
966 else 1061 else
967 c_entries++; 1062 c_entries++;
968 } else { 1063 } else {
969 err = dwarf_parse_fde(entry, entry_type, p, len, end); 1064 err = dwarf_parse_fde(entry, entry_type, p, len,
1065 end, mod);
970 if (err < 0) 1066 if (err < 0)
971 goto out; 1067 goto out;
972 else 1068 else
@@ -979,6 +1075,117 @@ static int __init dwarf_unwinder_init(void)
979 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", 1075 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
980 c_entries, f_entries); 1076 c_entries, f_entries);
981 1077
1078 return 0;
1079
1080out:
1081 return err;
1082}
1083
1084#ifdef CONFIG_MODULES
1085int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1086 struct module *me)
1087{
1088 unsigned int i, err;
1089 unsigned long start, end;
1090 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1091
1092 start = end = 0;
1093
1094 for (i = 1; i < hdr->e_shnum; i++) {
1095 /* Alloc bit cleared means "ignore it." */
1096 if ((sechdrs[i].sh_flags & SHF_ALLOC)
1097 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
1098 start = sechdrs[i].sh_addr;
1099 end = start + sechdrs[i].sh_size;
1100 break;
1101 }
1102 }
1103
1104 /* Did we find the .eh_frame section? */
1105 if (i != hdr->e_shnum) {
1106 INIT_LIST_HEAD(&me->arch.cie_list);
1107 INIT_LIST_HEAD(&me->arch.fde_list);
1108 err = dwarf_parse_section((char *)start, (char *)end, me);
1109 if (err) {
1110 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1111 me->name);
1112 return err;
1113 }
1114 }
1115
1116 return 0;
1117}
1118
1119/**
1120 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1121 * @mod: the module that is being unloaded
1122 *
1123 * Remove any FDEs and CIEs from the global lists that came from
1124 * @mod's .eh_frame section because @mod is being unloaded.
1125 */
1126void module_dwarf_cleanup(struct module *mod)
1127{
1128 struct dwarf_fde *fde, *ftmp;
1129 struct dwarf_cie *cie, *ctmp;
1130 unsigned long flags;
1131
1132 spin_lock_irqsave(&dwarf_cie_lock, flags);
1133
1134 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1135 list_del(&cie->link);
1136 rb_erase(&cie->node, &cie_root);
1137 kfree(cie);
1138 }
1139
1140 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1141
1142 spin_lock_irqsave(&dwarf_fde_lock, flags);
1143
1144 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1145 list_del(&fde->link);
1146 rb_erase(&fde->node, &fde_root);
1147 kfree(fde);
1148 }
1149
1150 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1151}
1152#endif /* CONFIG_MODULES */
1153
1154/**
1155 * dwarf_unwinder_init - initialise the dwarf unwinder
1156 *
1157 * Build the data structures describing the .dwarf_frame section to
1158 * make it easier to lookup CIE and FDE entries. Because the
1159 * .eh_frame section is packed as tightly as possible it is not
1160 * easy to lookup the FDE for a given PC, so we build a list of FDE
1161 * and CIE entries that make it easier.
1162 */
1163static int __init dwarf_unwinder_init(void)
1164{
1165 int err;
1166
1167 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1168 sizeof(struct dwarf_frame), 0,
1169 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1170
1171 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1172 sizeof(struct dwarf_reg), 0,
1173 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1174
1175 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1176 mempool_alloc_slab,
1177 mempool_free_slab,
1178 dwarf_frame_cachep);
1179
1180 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1181 mempool_alloc_slab,
1182 mempool_free_slab,
1183 dwarf_reg_cachep);
1184
1185 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1186 if (err)
1187 goto out;
1188
982 err = unwinder_register(&dwarf_unwinder); 1189 err = unwinder_register(&dwarf_unwinder);
983 if (err) 1190 if (err)
984 goto out; 1191 goto out;