aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/dwarf.h19
-rw-r--r--arch/sh/include/asm/module.h17
-rw-r--r--arch/sh/kernel/dwarf.c174
3 files changed, 143 insertions, 67 deletions
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
index bdccbbfdc0bd..d62abd1d0c05 100644
--- a/arch/sh/include/asm/dwarf.h
+++ b/arch/sh/include/asm/dwarf.h
@@ -243,16 +243,13 @@ struct dwarf_cie {
243 243
244 unsigned long cie_pointer; 244 unsigned long cie_pointer;
245 245
246 struct list_head link;
247
248 unsigned long flags; 246 unsigned long flags;
249#define DWARF_CIE_Z_AUGMENTATION (1 << 0) 247#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
250 248
251 /* 249 /* linked-list entry if this CIE is from a module */
252 * 'mod' will be non-NULL if this CIE came from a module's 250 struct list_head link;
253 * .eh_frame section. 251
254 */ 252 struct rb_node node;
255 struct module *mod;
256}; 253};
257 254
258/** 255/**
@@ -266,13 +263,11 @@ struct dwarf_fde {
266 unsigned long address_range; 263 unsigned long address_range;
267 unsigned char *instructions; 264 unsigned char *instructions;
268 unsigned char *end; 265 unsigned char *end;
266
267 /* linked-list entry if this FDE is from a module */
269 struct list_head link; 268 struct list_head link;
270 269
271 /* 270 struct rb_node node;
272 * 'mod' will be non-NULL if this FDE came from a module's
273 * .eh_frame section.
274 */
275 struct module *mod;
276}; 271};
277 272
278/** 273/**
diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h
index 068bf1659750..b7927de86f9f 100644
--- a/arch/sh/include/asm/module.h
+++ b/arch/sh/include/asm/module.h
@@ -1,7 +1,22 @@
1#ifndef _ASM_SH_MODULE_H 1#ifndef _ASM_SH_MODULE_H
2#define _ASM_SH_MODULE_H 2#define _ASM_SH_MODULE_H
3 3
4#include <asm-generic/module.h> 4struct mod_arch_specific {
5#ifdef CONFIG_DWARF_UNWINDER
6 struct list_head fde_list;
7 struct list_head cie_list;
8#endif
9};
10
11#ifdef CONFIG_64BIT
12#define Elf_Shdr Elf64_Shdr
13#define Elf_Sym Elf64_Sym
14#define Elf_Ehdr Elf64_Ehdr
15#else
16#define Elf_Shdr Elf32_Shdr
17#define Elf_Sym Elf32_Sym
18#define Elf_Ehdr Elf32_Ehdr
19#endif
5 20
6#ifdef CONFIG_CPU_LITTLE_ENDIAN 21#ifdef CONFIG_CPU_LITTLE_ENDIAN
7# ifdef CONFIG_CPU_SH2 22# ifdef CONFIG_CPU_SH2
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index e51168064e56..bd1c497280a6 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -39,10 +39,10 @@ static mempool_t *dwarf_frame_pool;
39static struct kmem_cache *dwarf_reg_cachep; 39static struct kmem_cache *dwarf_reg_cachep;
40static mempool_t *dwarf_reg_pool; 40static mempool_t *dwarf_reg_pool;
41 41
42static LIST_HEAD(dwarf_cie_list); 42static struct rb_root cie_root;
43static DEFINE_SPINLOCK(dwarf_cie_lock); 43static DEFINE_SPINLOCK(dwarf_cie_lock);
44 44
45static LIST_HEAD(dwarf_fde_list); 45static struct rb_root fde_root;
46static DEFINE_SPINLOCK(dwarf_fde_lock); 46static DEFINE_SPINLOCK(dwarf_fde_lock);
47 47
48static struct dwarf_cie *cached_cie; 48static struct dwarf_cie *cached_cie;
@@ -301,7 +301,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
301 */ 301 */
302static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) 302static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
303{ 303{
304 struct dwarf_cie *cie; 304 struct rb_node **rb_node = &cie_root.rb_node;
305 struct dwarf_cie *cie = NULL;
305 unsigned long flags; 306 unsigned long flags;
306 307
307 spin_lock_irqsave(&dwarf_cie_lock, flags); 308 spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -315,16 +316,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
315 goto out; 316 goto out;
316 } 317 }
317 318
318 list_for_each_entry(cie, &dwarf_cie_list, link) { 319 while (*rb_node) {
319 if (cie->cie_pointer == cie_ptr) { 320 struct dwarf_cie *cie_tmp;
320 cached_cie = cie; 321
321 break; 322 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
323 BUG_ON(!cie_tmp);
324
325 if (cie_ptr == cie_tmp->cie_pointer) {
326 cie = cie_tmp;
327 cached_cie = cie_tmp;
328 goto out;
329 } else {
330 if (cie_ptr < cie_tmp->cie_pointer)
331 rb_node = &(*rb_node)->rb_left;
332 else
333 rb_node = &(*rb_node)->rb_right;
322 } 334 }
323 } 335 }
324 336
325 /* Couldn't find the entry in the list. */
326 if (&cie->link == &dwarf_cie_list)
327 cie = NULL;
328out: 337out:
329 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 338 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
330 return cie; 339 return cie;
@@ -336,25 +345,34 @@ out:
336 */ 345 */
337struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) 346struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
338{ 347{
339 struct dwarf_fde *fde; 348 struct rb_node **rb_node = &fde_root.rb_node;
349 struct dwarf_fde *fde = NULL;
340 unsigned long flags; 350 unsigned long flags;
341 351
342 spin_lock_irqsave(&dwarf_fde_lock, flags); 352 spin_lock_irqsave(&dwarf_fde_lock, flags);
343 353
344 list_for_each_entry(fde, &dwarf_fde_list, link) { 354 while (*rb_node) {
345 unsigned long start, end; 355 struct dwarf_fde *fde_tmp;
356 unsigned long tmp_start, tmp_end;
346 357
347 start = fde->initial_location; 358 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
348 end = fde->initial_location + fde->address_range; 359 BUG_ON(!fde_tmp);
349 360
350 if (pc >= start && pc < end) 361 tmp_start = fde_tmp->initial_location;
351 break; 362 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
352 }
353 363
354 /* Couldn't find the entry in the list. */ 364 if (pc < tmp_start) {
355 if (&fde->link == &dwarf_fde_list) 365 rb_node = &(*rb_node)->rb_left;
356 fde = NULL; 366 } else {
367 if (pc < tmp_end) {
368 fde = fde_tmp;
369 goto out;
370 } else
371 rb_node = &(*rb_node)->rb_right;
372 }
373 }
357 374
375out:
358 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 376 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
359 377
360 return fde; 378 return fde;
@@ -552,8 +570,8 @@ extern void ret_from_irq(void);
552 * on the callstack. Each of the lower (older) stack frames are 570 * on the callstack. Each of the lower (older) stack frames are
553 * linked via the "prev" member. 571 * linked via the "prev" member.
554 */ 572 */
555struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, 573struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
556 struct dwarf_frame *prev) 574 struct dwarf_frame *prev)
557{ 575{
558 struct dwarf_frame *frame; 576 struct dwarf_frame *frame;
559 struct dwarf_cie *cie; 577 struct dwarf_cie *cie;
@@ -708,6 +726,8 @@ bail:
708static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 726static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
709 unsigned char *end, struct module *mod) 727 unsigned char *end, struct module *mod)
710{ 728{
729 struct rb_node **rb_node = &cie_root.rb_node;
730 struct rb_node *parent;
711 struct dwarf_cie *cie; 731 struct dwarf_cie *cie;
712 unsigned long flags; 732 unsigned long flags;
713 int count; 733 int count;
@@ -802,11 +822,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
802 cie->initial_instructions = p; 822 cie->initial_instructions = p;
803 cie->instructions_end = end; 823 cie->instructions_end = end;
804 824
805 cie->mod = mod;
806
807 /* Add to list */ 825 /* Add to list */
808 spin_lock_irqsave(&dwarf_cie_lock, flags); 826 spin_lock_irqsave(&dwarf_cie_lock, flags);
809 list_add_tail(&cie->link, &dwarf_cie_list); 827
828 while (*rb_node) {
829 struct dwarf_cie *cie_tmp;
830
831 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
832
833 parent = *rb_node;
834
835 if (cie->cie_pointer < cie_tmp->cie_pointer)
836 rb_node = &parent->rb_left;
837 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
838 rb_node = &parent->rb_right;
839 else
840 WARN_ON(1);
841 }
842
843 rb_link_node(&cie->node, parent, rb_node);
844 rb_insert_color(&cie->node, &cie_root);
845
846 if (mod != NULL)
847 list_add_tail(&cie->link, &mod->arch.cie_list);
848
810 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 849 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
811 850
812 return 0; 851 return 0;
@@ -816,6 +855,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
816 void *start, unsigned long len, 855 void *start, unsigned long len,
817 unsigned char *end, struct module *mod) 856 unsigned char *end, struct module *mod)
818{ 857{
858 struct rb_node **rb_node = &fde_root.rb_node;
859 struct rb_node *parent;
819 struct dwarf_fde *fde; 860 struct dwarf_fde *fde;
820 struct dwarf_cie *cie; 861 struct dwarf_cie *cie;
821 unsigned long flags; 862 unsigned long flags;
@@ -863,11 +904,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
863 fde->instructions = p; 904 fde->instructions = p;
864 fde->end = end; 905 fde->end = end;
865 906
866 fde->mod = mod;
867
868 /* Add to list. */ 907 /* Add to list. */
869 spin_lock_irqsave(&dwarf_fde_lock, flags); 908 spin_lock_irqsave(&dwarf_fde_lock, flags);
870 list_add_tail(&fde->link, &dwarf_fde_list); 909
910 while (*rb_node) {
911 struct dwarf_fde *fde_tmp;
912 unsigned long tmp_start, tmp_end;
913 unsigned long start, end;
914
915 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
916
917 start = fde->initial_location;
918 end = fde->initial_location + fde->address_range;
919
920 tmp_start = fde_tmp->initial_location;
921 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
922
923 parent = *rb_node;
924
925 if (start < tmp_start)
926 rb_node = &parent->rb_left;
927 else if (start >= tmp_end)
928 rb_node = &parent->rb_right;
929 else
930 WARN_ON(1);
931 }
932
933 rb_link_node(&fde->node, parent, rb_node);
934 rb_insert_color(&fde->node, &fde_root);
935
936 if (mod != NULL)
937 list_add_tail(&fde->link, &mod->arch.fde_list);
938
871 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 939 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
872 940
873 return 0; 941 return 0;
@@ -912,19 +980,29 @@ static struct unwinder dwarf_unwinder = {
912 980
913static void dwarf_unwinder_cleanup(void) 981static void dwarf_unwinder_cleanup(void)
914{ 982{
915 struct dwarf_cie *cie, *cie_tmp; 983 struct rb_node **fde_rb_node = &fde_root.rb_node;
916 struct dwarf_fde *fde, *fde_tmp; 984 struct rb_node **cie_rb_node = &cie_root.rb_node;
917 985
918 /* 986 /*
919 * Deallocate all the memory allocated for the DWARF unwinder. 987 * Deallocate all the memory allocated for the DWARF unwinder.
920 * Traverse all the FDE/CIE lists and remove and free all the 988 * Traverse all the FDE/CIE lists and remove and free all the
921 * memory associated with those data structures. 989 * memory associated with those data structures.
922 */ 990 */
923 list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link) 991 while (*fde_rb_node) {
924 kfree(cie); 992 struct dwarf_fde *fde;
925 993
926 list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link) 994 fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
995 rb_erase(*fde_rb_node, &fde_root);
927 kfree(fde); 996 kfree(fde);
997 }
998
999 while (*cie_rb_node) {
1000 struct dwarf_cie *cie;
1001
1002 cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
1003 rb_erase(*cie_rb_node, &cie_root);
1004 kfree(cie);
1005 }
928 1006
929 kmem_cache_destroy(dwarf_reg_cachep); 1007 kmem_cache_destroy(dwarf_reg_cachep);
930 kmem_cache_destroy(dwarf_frame_cachep); 1008 kmem_cache_destroy(dwarf_frame_cachep);
@@ -1024,6 +1102,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1024 1102
1025 /* Did we find the .eh_frame section? */ 1103 /* Did we find the .eh_frame section? */
1026 if (i != hdr->e_shnum) { 1104 if (i != hdr->e_shnum) {
1105 INIT_LIST_HEAD(&me->arch.cie_list);
1106 INIT_LIST_HEAD(&me->arch.fde_list);
1027 err = dwarf_parse_section((char *)start, (char *)end, me); 1107 err = dwarf_parse_section((char *)start, (char *)end, me);
1028 if (err) { 1108 if (err) {
1029 printk(KERN_WARNING "%s: failed to parse DWARF info\n", 1109 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
@@ -1044,38 +1124,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1044 */ 1124 */
1045void module_dwarf_cleanup(struct module *mod) 1125void module_dwarf_cleanup(struct module *mod)
1046{ 1126{
1047 struct dwarf_fde *fde; 1127 struct dwarf_fde *fde, *ftmp;
1048 struct dwarf_cie *cie; 1128 struct dwarf_cie *cie, *ctmp;
1049 unsigned long flags; 1129 unsigned long flags;
1050 1130
1051 spin_lock_irqsave(&dwarf_cie_lock, flags); 1131 spin_lock_irqsave(&dwarf_cie_lock, flags);
1052 1132
1053again_cie: 1133 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1054 list_for_each_entry(cie, &dwarf_cie_list, link) {
1055 if (cie->mod == mod)
1056 break;
1057 }
1058
1059 if (&cie->link != &dwarf_cie_list) {
1060 list_del(&cie->link); 1134 list_del(&cie->link);
1135 rb_erase(&cie->node, &cie_root);
1061 kfree(cie); 1136 kfree(cie);
1062 goto again_cie;
1063 } 1137 }
1064 1138
1065 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 1139 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1066 1140
1067 spin_lock_irqsave(&dwarf_fde_lock, flags); 1141 spin_lock_irqsave(&dwarf_fde_lock, flags);
1068 1142
1069again_fde: 1143 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1070 list_for_each_entry(fde, &dwarf_fde_list, link) {
1071 if (fde->mod == mod)
1072 break;
1073 }
1074
1075 if (&fde->link != &dwarf_fde_list) {
1076 list_del(&fde->link); 1144 list_del(&fde->link);
1145 rb_erase(&fde->node, &fde_root);
1077 kfree(fde); 1146 kfree(fde);
1078 goto again_fde;
1079 } 1147 }
1080 1148
1081 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 1149 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
@@ -1094,8 +1162,6 @@ again_fde:
1094static int __init dwarf_unwinder_init(void) 1162static int __init dwarf_unwinder_init(void)
1095{ 1163{
1096 int err; 1164 int err;
1097 INIT_LIST_HEAD(&dwarf_cie_list);
1098 INIT_LIST_HEAD(&dwarf_fde_list);
1099 1165
1100 dwarf_frame_cachep = kmem_cache_create("dwarf_frames", 1166 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1101 sizeof(struct dwarf_frame), 0, 1167 sizeof(struct dwarf_frame), 0,