diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-16 16:07:38 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-16 16:07:38 -0400 |
commit | 97f361e2498ada54b48a235619eaf5af8e46427e (patch) | |
tree | 20b2802ebd75ef05028e029159db034f1d126cc1 /arch/sh/kernel/dwarf.c | |
parent | cd7246f0e2747bd2b43d25d0f63f05db182a62c0 (diff) |
sh: unwinder: Move initialization to early_initcall() and tidy up locking.
This moves the initialization over to an early_initcall(). This fixes up
some lockdep interaction issues. At the same time, kill off some
superfluous locking in the init path.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r-- | arch/sh/kernel/dwarf.c | 32 |
1 files changed, 15 insertions, 17 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 5fd6e604816d..d0652153f576 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -26,10 +26,10 @@ | |||
26 | #include <asm/stacktrace.h> | 26 | #include <asm/stacktrace.h> |
27 | 27 | ||
28 | static LIST_HEAD(dwarf_cie_list); | 28 | static LIST_HEAD(dwarf_cie_list); |
29 | DEFINE_SPINLOCK(dwarf_cie_lock); | 29 | static DEFINE_SPINLOCK(dwarf_cie_lock); |
30 | 30 | ||
31 | static LIST_HEAD(dwarf_fde_list); | 31 | static LIST_HEAD(dwarf_fde_list); |
32 | DEFINE_SPINLOCK(dwarf_fde_lock); | 32 | static DEFINE_SPINLOCK(dwarf_fde_lock); |
33 | 33 | ||
34 | static struct dwarf_cie *cached_cie; | 34 | static struct dwarf_cie *cached_cie; |
35 | 35 | ||
@@ -264,7 +264,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) | |||
264 | */ | 264 | */ |
265 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | 265 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) |
266 | { | 266 | { |
267 | struct dwarf_cie *cie, *n; | 267 | struct dwarf_cie *cie; |
268 | unsigned long flags; | 268 | unsigned long flags; |
269 | 269 | ||
270 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 270 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
@@ -278,7 +278,7 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | |||
278 | goto out; | 278 | goto out; |
279 | } | 279 | } |
280 | 280 | ||
281 | list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { | 281 | list_for_each_entry(cie, &dwarf_cie_list, link) { |
282 | if (cie->cie_pointer == cie_ptr) { | 282 | if (cie->cie_pointer == cie_ptr) { |
283 | cached_cie = cie; | 283 | cached_cie = cie; |
284 | break; | 284 | break; |
@@ -299,11 +299,12 @@ out: | |||
299 | */ | 299 | */ |
300 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | 300 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) |
301 | { | 301 | { |
302 | struct dwarf_fde *fde; | ||
302 | unsigned long flags; | 303 | unsigned long flags; |
303 | struct dwarf_fde *fde, *n; | ||
304 | 304 | ||
305 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 305 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
306 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { | 306 | |
307 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
307 | unsigned long start, end; | 308 | unsigned long start, end; |
308 | 309 | ||
309 | start = fde->initial_location; | 310 | start = fde->initial_location; |
@@ -787,24 +788,19 @@ static struct unwinder dwarf_unwinder = { | |||
787 | 788 | ||
788 | static void dwarf_unwinder_cleanup(void) | 789 | static void dwarf_unwinder_cleanup(void) |
789 | { | 790 | { |
790 | struct dwarf_cie *cie, *m; | 791 | struct dwarf_cie *cie; |
791 | struct dwarf_fde *fde, *n; | 792 | struct dwarf_fde *fde; |
792 | unsigned long flags; | ||
793 | 793 | ||
794 | /* | 794 | /* |
795 | * Deallocate all the memory allocated for the DWARF unwinder. | 795 | * Deallocate all the memory allocated for the DWARF unwinder. |
796 | * Traverse all the FDE/CIE lists and remove and free all the | 796 | * Traverse all the FDE/CIE lists and remove and free all the |
797 | * memory associated with those data structures. | 797 | * memory associated with those data structures. |
798 | */ | 798 | */ |
799 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 799 | list_for_each_entry(cie, &dwarf_cie_list, link) |
800 | list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) | ||
801 | kfree(cie); | 800 | kfree(cie); |
802 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
803 | 801 | ||
804 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 802 | list_for_each_entry(fde, &dwarf_fde_list, link) |
805 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) | ||
806 | kfree(fde); | 803 | kfree(fde); |
807 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
808 | } | 804 | } |
809 | 805 | ||
810 | /** | 806 | /** |
@@ -816,7 +812,7 @@ static void dwarf_unwinder_cleanup(void) | |||
816 | * easy to lookup the FDE for a given PC, so we build a list of FDE | 812 | * easy to lookup the FDE for a given PC, so we build a list of FDE |
817 | * and CIE entries that make it easier. | 813 | * and CIE entries that make it easier. |
818 | */ | 814 | */ |
819 | void dwarf_unwinder_init(void) | 815 | static int __init dwarf_unwinder_init(void) |
820 | { | 816 | { |
821 | u32 entry_type; | 817 | u32 entry_type; |
822 | void *p, *entry; | 818 | void *p, *entry; |
@@ -877,9 +873,11 @@ void dwarf_unwinder_init(void) | |||
877 | if (err) | 873 | if (err) |
878 | goto out; | 874 | goto out; |
879 | 875 | ||
880 | return; | 876 | return 0; |
881 | 877 | ||
882 | out: | 878 | out: |
883 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); | 879 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); |
884 | dwarf_unwinder_cleanup(); | 880 | dwarf_unwinder_cleanup(); |
881 | return -EINVAL; | ||
885 | } | 882 | } |
883 | early_initcall(dwarf_unwinder_init); | ||