diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-13 13:10:59 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-13 13:10:59 -0400 |
commit | 3497447f15485b479366ec86effaac16fc82411b (patch) | |
tree | 52369aa1441d5a4ff4fea1d175c96d63396e7224 /arch/sh/kernel/dwarf.c | |
parent | cafb0ddac60556f7d2d4cd0ef1a93da8a6c71ffb (diff) |
sh: unwinder: Fix up usage of unaligned accessors.
This was using internal symbols for unaligned accesses, bypassing the
exposed interface for variable sized safe accesses. This converts all of
the __get_unaligned_cpuXX() users over to get_unaligned() directly,
relying on the cast to select the proper internal routine.
Additionally, the __put_unaligned_cpuXX() case is superfluous given that
the destination address is aligned in all of the current cases, so just
drop that outright.
Furthermore, this switches to the asm/unaligned.h header instead of the
asm-generic version, which was silently bypassing the SH-4A optimized
unaligned ops.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r-- | arch/sh/kernel/dwarf.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 09c6fd7fd05f..d1d8536e5ba3 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/dwarf.h> | 21 | #include <asm/dwarf.h> |
22 | #include <asm/unwinder.h> | 22 | #include <asm/unwinder.h> |
23 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
24 | #include <asm-generic/unaligned.h> | 24 | #include <asm/unaligned.h> |
25 | #include <asm/dwarf.h> | 25 | #include <asm/dwarf.h> |
26 | #include <asm/stacktrace.h> | 26 | #include <asm/stacktrace.h> |
27 | 27 | ||
@@ -87,11 +87,9 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, | |||
87 | * from @src and writing to @dst, because they can be arbitrarily | 87 | * from @src and writing to @dst, because they can be arbitrarily |
88 | * aligned. Return 'n' - the number of bytes read. | 88 | * aligned. Return 'n' - the number of bytes read. |
89 | */ | 89 | */ |
90 | static inline int dwarf_read_addr(void *src, void *dst) | 90 | static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) |
91 | { | 91 | { |
92 | u32 val = __get_unaligned_cpu32(src); | 92 | *dst = get_unaligned(src); |
93 | __put_unaligned_cpu32(val, dst); | ||
94 | |||
95 | return sizeof(unsigned long *); | 93 | return sizeof(unsigned long *); |
96 | } | 94 | } |
97 | 95 | ||
@@ -207,7 +205,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, | |||
207 | case DW_EH_PE_sdata4: | 205 | case DW_EH_PE_sdata4: |
208 | case DW_EH_PE_udata4: | 206 | case DW_EH_PE_udata4: |
209 | count += 4; | 207 | count += 4; |
210 | decoded_addr += __get_unaligned_cpu32(addr); | 208 | decoded_addr += get_unaligned((u32 *)addr); |
211 | __raw_writel(decoded_addr, val); | 209 | __raw_writel(decoded_addr, val); |
212 | break; | 210 | break; |
213 | default: | 211 | default: |
@@ -232,7 +230,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) | |||
232 | u32 initial_len; | 230 | u32 initial_len; |
233 | int count; | 231 | int count; |
234 | 232 | ||
235 | initial_len = __get_unaligned_cpu32(addr); | 233 | initial_len = get_unaligned((u32 *)addr); |
236 | count = 4; | 234 | count = 4; |
237 | 235 | ||
238 | /* | 236 | /* |
@@ -247,7 +245,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) | |||
247 | * compulsory 32-bit length field. | 245 | * compulsory 32-bit length field. |
248 | */ | 246 | */ |
249 | if (initial_len == DW_EXT_DWARF64) { | 247 | if (initial_len == DW_EXT_DWARF64) { |
250 | *len = __get_unaligned_cpu64(addr + 4); | 248 | *len = get_unaligned((u64 *)addr + 4); |
251 | count = 12; | 249 | count = 12; |
252 | } else { | 250 | } else { |
253 | printk(KERN_WARNING "Unknown DWARF extension\n"); | 251 | printk(KERN_WARNING "Unknown DWARF extension\n"); |
@@ -392,12 +390,12 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
392 | frame->pc += delta * cie->code_alignment_factor; | 390 | frame->pc += delta * cie->code_alignment_factor; |
393 | break; | 391 | break; |
394 | case DW_CFA_advance_loc2: | 392 | case DW_CFA_advance_loc2: |
395 | delta = __get_unaligned_cpu16(current_insn); | 393 | delta = get_unaligned((u16 *)current_insn); |
396 | current_insn += 2; | 394 | current_insn += 2; |
397 | frame->pc += delta * cie->code_alignment_factor; | 395 | frame->pc += delta * cie->code_alignment_factor; |
398 | break; | 396 | break; |
399 | case DW_CFA_advance_loc4: | 397 | case DW_CFA_advance_loc4: |
400 | delta = __get_unaligned_cpu32(current_insn); | 398 | delta = get_unaligned((u32 *)current_insn); |
401 | current_insn += 4; | 399 | current_insn += 4; |
402 | frame->pc += delta * cie->code_alignment_factor; | 400 | frame->pc += delta * cie->code_alignment_factor; |
403 | break; | 401 | break; |
@@ -841,7 +839,7 @@ void dwarf_unwinder_init(void) | |||
841 | /* initial length does not include itself */ | 839 | /* initial length does not include itself */ |
842 | end = p + len; | 840 | end = p + len; |
843 | 841 | ||
844 | entry_type = __get_unaligned_cpu32(p); | 842 | entry_type = get_unaligned((u32 *)p); |
845 | p += 4; | 843 | p += 4; |
846 | 844 | ||
847 | if (entry_type == DW_EH_FRAME_CIE) { | 845 | if (entry_type == DW_EH_FRAME_CIE) { |