diff options
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r-- | arch/sh/kernel/dwarf.c | 902 |
1 files changed, 902 insertions, 0 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c new file mode 100644 index 000000000000..c6c5764a8ab1 --- /dev/null +++ b/arch/sh/kernel/dwarf.c | |||
@@ -0,0 +1,902 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * This is an implementation of a DWARF unwinder. Its main purpose is | ||
9 | * for generating stacktrace information. Based on the DWARF 3 | ||
10 | * specification from http://www.dwarfstd.org. | ||
11 | * | ||
12 | * TODO: | ||
13 | * - DWARF64 doesn't work. | ||
14 | */ | ||
15 | |||
16 | /* #define DEBUG */ | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <asm/dwarf.h> | ||
22 | #include <asm/unwinder.h> | ||
23 | #include <asm/sections.h> | ||
24 | #include <asm/unaligned.h> | ||
25 | #include <asm/dwarf.h> | ||
26 | #include <asm/stacktrace.h> | ||
27 | |||
28 | static LIST_HEAD(dwarf_cie_list); | ||
29 | DEFINE_SPINLOCK(dwarf_cie_lock); | ||
30 | |||
31 | static LIST_HEAD(dwarf_fde_list); | ||
32 | DEFINE_SPINLOCK(dwarf_fde_lock); | ||
33 | |||
34 | static struct dwarf_cie *cached_cie; | ||
35 | |||
36 | /* | ||
37 | * Figure out whether we need to allocate some dwarf registers. If dwarf | ||
38 | * registers have already been allocated then we may need to realloc | ||
39 | * them. "reg" is a register number that we need to be able to access | ||
40 | * after this call. | ||
41 | * | ||
42 | * Register numbers start at zero, therefore we need to allocate space | ||
43 | * for "reg" + 1 registers. | ||
44 | */ | ||
45 | static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, | ||
46 | unsigned int reg) | ||
47 | { | ||
48 | struct dwarf_reg *regs; | ||
49 | unsigned int num_regs = reg + 1; | ||
50 | size_t new_size; | ||
51 | size_t old_size; | ||
52 | |||
53 | new_size = num_regs * sizeof(*regs); | ||
54 | old_size = frame->num_regs * sizeof(*regs); | ||
55 | |||
56 | /* Fast path: don't allocate any regs if we've already got enough. */ | ||
57 | if (frame->num_regs >= num_regs) | ||
58 | return; | ||
59 | |||
60 | regs = kzalloc(new_size, GFP_ATOMIC); | ||
61 | if (!regs) { | ||
62 | printk(KERN_WARNING "Unable to allocate DWARF registers\n"); | ||
63 | /* | ||
64 | * Let's just bomb hard here, we have no way to | ||
65 | * gracefully recover. | ||
66 | */ | ||
67 | BUG(); | ||
68 | } | ||
69 | |||
70 | if (frame->regs) { | ||
71 | memcpy(regs, frame->regs, old_size); | ||
72 | kfree(frame->regs); | ||
73 | } | ||
74 | |||
75 | frame->regs = regs; | ||
76 | frame->num_regs = num_regs; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * dwarf_read_addr - read dwarf data | ||
81 | * @src: source address of data | ||
82 | * @dst: destination address to store the data to | ||
83 | * | ||
84 | * Read 'n' bytes from @src, where 'n' is the size of an address on | ||
85 | * the native machine. We return the number of bytes read, which | ||
86 | * should always be 'n'. We also have to be careful when reading | ||
87 | * from @src and writing to @dst, because they can be arbitrarily | ||
88 | * aligned. Return 'n' - the number of bytes read. | ||
89 | */ | ||
90 | static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) | ||
91 | { | ||
92 | u32 val = get_unaligned(src); | ||
93 | put_unaligned(val, dst); | ||
94 | return sizeof(unsigned long *); | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * dwarf_read_uleb128 - read unsigned LEB128 data | ||
99 | * @addr: the address where the ULEB128 data is stored | ||
100 | * @ret: address to store the result | ||
101 | * | ||
102 | * Decode an unsigned LEB128 encoded datum. The algorithm is taken | ||
103 | * from Appendix C of the DWARF 3 spec. For information on the | ||
104 | * encodings refer to section "7.6 - Variable Length Data". Return | ||
105 | * the number of bytes read. | ||
106 | */ | ||
107 | static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) | ||
108 | { | ||
109 | unsigned int result; | ||
110 | unsigned char byte; | ||
111 | int shift, count; | ||
112 | |||
113 | result = 0; | ||
114 | shift = 0; | ||
115 | count = 0; | ||
116 | |||
117 | while (1) { | ||
118 | byte = __raw_readb(addr); | ||
119 | addr++; | ||
120 | count++; | ||
121 | |||
122 | result |= (byte & 0x7f) << shift; | ||
123 | shift += 7; | ||
124 | |||
125 | if (!(byte & 0x80)) | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | *ret = result; | ||
130 | |||
131 | return count; | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * dwarf_read_leb128 - read signed LEB128 data | ||
136 | * @addr: the address of the LEB128 encoded data | ||
137 | * @ret: address to store the result | ||
138 | * | ||
139 | * Decode signed LEB128 data. The algorithm is taken from Appendix | ||
140 | * C of the DWARF 3 spec. Return the number of bytes read. | ||
141 | */ | ||
142 | static inline unsigned long dwarf_read_leb128(char *addr, int *ret) | ||
143 | { | ||
144 | unsigned char byte; | ||
145 | int result, shift; | ||
146 | int num_bits; | ||
147 | int count; | ||
148 | |||
149 | result = 0; | ||
150 | shift = 0; | ||
151 | count = 0; | ||
152 | |||
153 | while (1) { | ||
154 | byte = __raw_readb(addr); | ||
155 | addr++; | ||
156 | result |= (byte & 0x7f) << shift; | ||
157 | shift += 7; | ||
158 | count++; | ||
159 | |||
160 | if (!(byte & 0x80)) | ||
161 | break; | ||
162 | } | ||
163 | |||
164 | /* The number of bits in a signed integer. */ | ||
165 | num_bits = 8 * sizeof(result); | ||
166 | |||
167 | if ((shift < num_bits) && (byte & 0x40)) | ||
168 | result |= (-1 << shift); | ||
169 | |||
170 | *ret = result; | ||
171 | |||
172 | return count; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * dwarf_read_encoded_value - return the decoded value at @addr | ||
177 | * @addr: the address of the encoded value | ||
178 | * @val: where to write the decoded value | ||
179 | * @encoding: the encoding with which we can decode @addr | ||
180 | * | ||
181 | * GCC emits encoded address in the .eh_frame FDE entries. Decode | ||
182 | * the value at @addr using @encoding. The decoded value is written | ||
183 | * to @val and the number of bytes read is returned. | ||
184 | */ | ||
185 | static int dwarf_read_encoded_value(char *addr, unsigned long *val, | ||
186 | char encoding) | ||
187 | { | ||
188 | unsigned long decoded_addr = 0; | ||
189 | int count = 0; | ||
190 | |||
191 | switch (encoding & 0x70) { | ||
192 | case DW_EH_PE_absptr: | ||
193 | break; | ||
194 | case DW_EH_PE_pcrel: | ||
195 | decoded_addr = (unsigned long)addr; | ||
196 | break; | ||
197 | default: | ||
198 | pr_debug("encoding=0x%x\n", (encoding & 0x70)); | ||
199 | BUG(); | ||
200 | } | ||
201 | |||
202 | if ((encoding & 0x07) == 0x00) | ||
203 | encoding |= DW_EH_PE_udata4; | ||
204 | |||
205 | switch (encoding & 0x0f) { | ||
206 | case DW_EH_PE_sdata4: | ||
207 | case DW_EH_PE_udata4: | ||
208 | count += 4; | ||
209 | decoded_addr += get_unaligned((u32 *)addr); | ||
210 | __raw_writel(decoded_addr, val); | ||
211 | break; | ||
212 | default: | ||
213 | pr_debug("encoding=0x%x\n", encoding); | ||
214 | BUG(); | ||
215 | } | ||
216 | |||
217 | return count; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * dwarf_entry_len - return the length of an FDE or CIE | ||
222 | * @addr: the address of the entry | ||
223 | * @len: the length of the entry | ||
224 | * | ||
225 | * Read the initial_length field of the entry and store the size of | ||
226 | * the entry in @len. We return the number of bytes read. Return a | ||
227 | * count of 0 on error. | ||
228 | */ | ||
229 | static inline int dwarf_entry_len(char *addr, unsigned long *len) | ||
230 | { | ||
231 | u32 initial_len; | ||
232 | int count; | ||
233 | |||
234 | initial_len = get_unaligned((u32 *)addr); | ||
235 | count = 4; | ||
236 | |||
237 | /* | ||
238 | * An initial length field value in the range DW_LEN_EXT_LO - | ||
239 | * DW_LEN_EXT_HI indicates an extension, and should not be | ||
240 | * interpreted as a length. The only extension that we currently | ||
241 | * understand is the use of DWARF64 addresses. | ||
242 | */ | ||
243 | if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { | ||
244 | /* | ||
245 | * The 64-bit length field immediately follows the | ||
246 | * compulsory 32-bit length field. | ||
247 | */ | ||
248 | if (initial_len == DW_EXT_DWARF64) { | ||
249 | *len = get_unaligned((u64 *)addr + 4); | ||
250 | count = 12; | ||
251 | } else { | ||
252 | printk(KERN_WARNING "Unknown DWARF extension\n"); | ||
253 | count = 0; | ||
254 | } | ||
255 | } else | ||
256 | *len = initial_len; | ||
257 | |||
258 | return count; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * dwarf_lookup_cie - locate the cie | ||
263 | * @cie_ptr: pointer to help with lookup | ||
264 | */ | ||
265 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | ||
266 | { | ||
267 | struct dwarf_cie *cie, *n; | ||
268 | unsigned long flags; | ||
269 | |||
270 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
271 | |||
272 | /* | ||
273 | * We've cached the last CIE we looked up because chances are | ||
274 | * that the FDE wants this CIE. | ||
275 | */ | ||
276 | if (cached_cie && cached_cie->cie_pointer == cie_ptr) { | ||
277 | cie = cached_cie; | ||
278 | goto out; | ||
279 | } | ||
280 | |||
281 | list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { | ||
282 | if (cie->cie_pointer == cie_ptr) { | ||
283 | cached_cie = cie; | ||
284 | break; | ||
285 | } | ||
286 | } | ||
287 | |||
288 | /* Couldn't find the entry in the list. */ | ||
289 | if (&cie->link == &dwarf_cie_list) | ||
290 | cie = NULL; | ||
291 | out: | ||
292 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
293 | return cie; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * dwarf_lookup_fde - locate the FDE that covers pc | ||
298 | * @pc: the program counter | ||
299 | */ | ||
300 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | ||
301 | { | ||
302 | unsigned long flags; | ||
303 | struct dwarf_fde *fde, *n; | ||
304 | |||
305 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
306 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { | ||
307 | unsigned long start, end; | ||
308 | |||
309 | start = fde->initial_location; | ||
310 | end = fde->initial_location + fde->address_range; | ||
311 | |||
312 | if (pc >= start && pc < end) | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | /* Couldn't find the entry in the list. */ | ||
317 | if (&fde->link == &dwarf_fde_list) | ||
318 | fde = NULL; | ||
319 | |||
320 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
321 | |||
322 | return fde; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * dwarf_cfa_execute_insns - execute instructions to calculate a CFA | ||
327 | * @insn_start: address of the first instruction | ||
328 | * @insn_end: address of the last instruction | ||
329 | * @cie: the CIE for this function | ||
330 | * @fde: the FDE for this function | ||
331 | * @frame: the instructions calculate the CFA for this frame | ||
332 | * @pc: the program counter of the address we're interested in | ||
333 | * @define_ra: keep executing insns until the return addr reg is defined? | ||
334 | * | ||
335 | * Execute the Call Frame instruction sequence starting at | ||
336 | * @insn_start and ending at @insn_end. The instructions describe | ||
337 | * how to calculate the Canonical Frame Address of a stackframe. | ||
338 | * Store the results in @frame. | ||
339 | */ | ||
340 | static int dwarf_cfa_execute_insns(unsigned char *insn_start, | ||
341 | unsigned char *insn_end, | ||
342 | struct dwarf_cie *cie, | ||
343 | struct dwarf_fde *fde, | ||
344 | struct dwarf_frame *frame, | ||
345 | unsigned long pc, | ||
346 | bool define_ra) | ||
347 | { | ||
348 | unsigned char insn; | ||
349 | unsigned char *current_insn; | ||
350 | unsigned int count, delta, reg, expr_len, offset; | ||
351 | bool seen_ra_reg; | ||
352 | |||
353 | current_insn = insn_start; | ||
354 | |||
355 | /* | ||
356 | * If we're executing instructions for the dwarf_unwind_stack() | ||
357 | * FDE we need to keep executing instructions until the value of | ||
358 | * DWARF_ARCH_RA_REG is defined. See the comment in | ||
359 | * dwarf_unwind_stack() for more details. | ||
360 | */ | ||
361 | if (define_ra) | ||
362 | seen_ra_reg = false; | ||
363 | else | ||
364 | seen_ra_reg = true; | ||
365 | |||
366 | while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) { | ||
367 | insn = __raw_readb(current_insn++); | ||
368 | |||
369 | if (!seen_ra_reg) { | ||
370 | if (frame->num_regs >= DWARF_ARCH_RA_REG && | ||
371 | frame->regs[DWARF_ARCH_RA_REG].flags) | ||
372 | seen_ra_reg = true; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Firstly, handle the opcodes that embed their operands | ||
377 | * in the instructions. | ||
378 | */ | ||
379 | switch (DW_CFA_opcode(insn)) { | ||
380 | case DW_CFA_advance_loc: | ||
381 | delta = DW_CFA_operand(insn); | ||
382 | delta *= cie->code_alignment_factor; | ||
383 | frame->pc += delta; | ||
384 | continue; | ||
385 | /* NOTREACHED */ | ||
386 | case DW_CFA_offset: | ||
387 | reg = DW_CFA_operand(insn); | ||
388 | count = dwarf_read_uleb128(current_insn, &offset); | ||
389 | current_insn += count; | ||
390 | offset *= cie->data_alignment_factor; | ||
391 | dwarf_frame_alloc_regs(frame, reg); | ||
392 | frame->regs[reg].addr = offset; | ||
393 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | ||
394 | continue; | ||
395 | /* NOTREACHED */ | ||
396 | case DW_CFA_restore: | ||
397 | reg = DW_CFA_operand(insn); | ||
398 | continue; | ||
399 | /* NOTREACHED */ | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Secondly, handle the opcodes that don't embed their | ||
404 | * operands in the instruction. | ||
405 | */ | ||
406 | switch (insn) { | ||
407 | case DW_CFA_nop: | ||
408 | continue; | ||
409 | case DW_CFA_advance_loc1: | ||
410 | delta = *current_insn++; | ||
411 | frame->pc += delta * cie->code_alignment_factor; | ||
412 | break; | ||
413 | case DW_CFA_advance_loc2: | ||
414 | delta = get_unaligned((u16 *)current_insn); | ||
415 | current_insn += 2; | ||
416 | frame->pc += delta * cie->code_alignment_factor; | ||
417 | break; | ||
418 | case DW_CFA_advance_loc4: | ||
419 | delta = get_unaligned((u32 *)current_insn); | ||
420 | current_insn += 4; | ||
421 | frame->pc += delta * cie->code_alignment_factor; | ||
422 | break; | ||
423 | case DW_CFA_offset_extended: | ||
424 | count = dwarf_read_uleb128(current_insn, ®); | ||
425 | current_insn += count; | ||
426 | count = dwarf_read_uleb128(current_insn, &offset); | ||
427 | current_insn += count; | ||
428 | offset *= cie->data_alignment_factor; | ||
429 | break; | ||
430 | case DW_CFA_restore_extended: | ||
431 | count = dwarf_read_uleb128(current_insn, ®); | ||
432 | current_insn += count; | ||
433 | break; | ||
434 | case DW_CFA_undefined: | ||
435 | count = dwarf_read_uleb128(current_insn, ®); | ||
436 | current_insn += count; | ||
437 | break; | ||
438 | case DW_CFA_def_cfa: | ||
439 | count = dwarf_read_uleb128(current_insn, | ||
440 | &frame->cfa_register); | ||
441 | current_insn += count; | ||
442 | count = dwarf_read_uleb128(current_insn, | ||
443 | &frame->cfa_offset); | ||
444 | current_insn += count; | ||
445 | |||
446 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | ||
447 | break; | ||
448 | case DW_CFA_def_cfa_register: | ||
449 | count = dwarf_read_uleb128(current_insn, | ||
450 | &frame->cfa_register); | ||
451 | current_insn += count; | ||
452 | frame->cfa_offset = 0; | ||
453 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | ||
454 | break; | ||
455 | case DW_CFA_def_cfa_offset: | ||
456 | count = dwarf_read_uleb128(current_insn, &offset); | ||
457 | current_insn += count; | ||
458 | frame->cfa_offset = offset; | ||
459 | break; | ||
460 | case DW_CFA_def_cfa_expression: | ||
461 | count = dwarf_read_uleb128(current_insn, &expr_len); | ||
462 | current_insn += count; | ||
463 | |||
464 | frame->cfa_expr = current_insn; | ||
465 | frame->cfa_expr_len = expr_len; | ||
466 | current_insn += expr_len; | ||
467 | |||
468 | frame->flags |= DWARF_FRAME_CFA_REG_EXP; | ||
469 | break; | ||
470 | case DW_CFA_offset_extended_sf: | ||
471 | count = dwarf_read_uleb128(current_insn, ®); | ||
472 | current_insn += count; | ||
473 | count = dwarf_read_leb128(current_insn, &offset); | ||
474 | current_insn += count; | ||
475 | offset *= cie->data_alignment_factor; | ||
476 | dwarf_frame_alloc_regs(frame, reg); | ||
477 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | ||
478 | frame->regs[reg].addr = offset; | ||
479 | break; | ||
480 | case DW_CFA_val_offset: | ||
481 | count = dwarf_read_uleb128(current_insn, ®); | ||
482 | current_insn += count; | ||
483 | count = dwarf_read_leb128(current_insn, &offset); | ||
484 | offset *= cie->data_alignment_factor; | ||
485 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | ||
486 | frame->regs[reg].addr = offset; | ||
487 | break; | ||
488 | default: | ||
489 | pr_debug("unhandled DWARF instruction 0x%x\n", insn); | ||
490 | break; | ||
491 | } | ||
492 | } | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * dwarf_unwind_stack - recursively unwind the stack | ||
499 | * @pc: address of the function to unwind | ||
500 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | ||
501 | * | ||
502 | * Return a struct dwarf_frame representing the most recent frame | ||
503 | * on the callstack. Each of the lower (older) stack frames are | ||
504 | * linked via the "prev" member. | ||
505 | */ | ||
506 | struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | ||
507 | struct dwarf_frame *prev) | ||
508 | { | ||
509 | struct dwarf_frame *frame; | ||
510 | struct dwarf_cie *cie; | ||
511 | struct dwarf_fde *fde; | ||
512 | unsigned long addr; | ||
513 | int i, offset; | ||
514 | bool define_ra = false; | ||
515 | |||
516 | /* | ||
517 | * If this is the first invocation of this recursive function we | ||
518 | * need get the contents of a physical register to get the CFA | ||
519 | * in order to begin the virtual unwinding of the stack. | ||
520 | * | ||
521 | * Setting "define_ra" to true indictates that we want | ||
522 | * dwarf_cfa_execute_insns() to continue executing instructions | ||
523 | * until we know how to calculate the value of DWARF_ARCH_RA_REG | ||
524 | * (which we need in order to kick off the whole unwinding | ||
525 | * process). | ||
526 | * | ||
527 | * NOTE: the return address is guaranteed to be setup by the | ||
528 | * time this function makes its first function call. | ||
529 | */ | ||
530 | if (!pc && !prev) { | ||
531 | pc = (unsigned long)&dwarf_unwind_stack; | ||
532 | define_ra = true; | ||
533 | } | ||
534 | |||
535 | frame = kzalloc(sizeof(*frame), GFP_ATOMIC); | ||
536 | if (!frame) | ||
537 | return NULL; | ||
538 | |||
539 | frame->prev = prev; | ||
540 | |||
541 | fde = dwarf_lookup_fde(pc); | ||
542 | if (!fde) { | ||
543 | /* | ||
544 | * This is our normal exit path - the one that stops the | ||
545 | * recursion. There's two reasons why we might exit | ||
546 | * here, | ||
547 | * | ||
548 | * a) pc has no asscociated DWARF frame info and so | ||
549 | * we don't know how to unwind this frame. This is | ||
550 | * usually the case when we're trying to unwind a | ||
551 | * frame that was called from some assembly code | ||
552 | * that has no DWARF info, e.g. syscalls. | ||
553 | * | ||
554 | * b) the DEBUG info for pc is bogus. There's | ||
555 | * really no way to distinguish this case from the | ||
556 | * case above, which sucks because we could print a | ||
557 | * warning here. | ||
558 | */ | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | cie = dwarf_lookup_cie(fde->cie_pointer); | ||
563 | |||
564 | frame->pc = fde->initial_location; | ||
565 | |||
566 | /* CIE initial instructions */ | ||
567 | dwarf_cfa_execute_insns(cie->initial_instructions, | ||
568 | cie->instructions_end, cie, fde, | ||
569 | frame, pc, false); | ||
570 | |||
571 | /* FDE instructions */ | ||
572 | dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, | ||
573 | fde, frame, pc, define_ra); | ||
574 | |||
575 | /* Calculate the CFA */ | ||
576 | switch (frame->flags) { | ||
577 | case DWARF_FRAME_CFA_REG_OFFSET: | ||
578 | if (prev) { | ||
579 | BUG_ON(!prev->regs[frame->cfa_register].flags); | ||
580 | |||
581 | addr = prev->cfa; | ||
582 | addr += prev->regs[frame->cfa_register].addr; | ||
583 | frame->cfa = __raw_readl(addr); | ||
584 | |||
585 | } else { | ||
586 | /* | ||
587 | * Again, this is the first invocation of this | ||
588 | * recurisve function. We need to physically | ||
589 | * read the contents of a register in order to | ||
590 | * get the Canonical Frame Address for this | ||
591 | * function. | ||
592 | */ | ||
593 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | ||
594 | } | ||
595 | |||
596 | frame->cfa += frame->cfa_offset; | ||
597 | break; | ||
598 | default: | ||
599 | BUG(); | ||
600 | } | ||
601 | |||
602 | /* If we haven't seen the return address reg, we're screwed. */ | ||
603 | BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); | ||
604 | |||
605 | for (i = 0; i <= frame->num_regs; i++) { | ||
606 | struct dwarf_reg *reg = &frame->regs[i]; | ||
607 | |||
608 | if (!reg->flags) | ||
609 | continue; | ||
610 | |||
611 | offset = reg->addr; | ||
612 | offset += frame->cfa; | ||
613 | } | ||
614 | |||
615 | addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; | ||
616 | frame->return_addr = __raw_readl(addr); | ||
617 | |||
618 | frame->next = dwarf_unwind_stack(frame->return_addr, frame); | ||
619 | return frame; | ||
620 | } | ||
621 | |||
622 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | ||
623 | unsigned char *end) | ||
624 | { | ||
625 | struct dwarf_cie *cie; | ||
626 | unsigned long flags; | ||
627 | int count; | ||
628 | |||
629 | cie = kzalloc(sizeof(*cie), GFP_KERNEL); | ||
630 | if (!cie) | ||
631 | return -ENOMEM; | ||
632 | |||
633 | cie->length = len; | ||
634 | |||
635 | /* | ||
636 | * Record the offset into the .eh_frame section | ||
637 | * for this CIE. It allows this CIE to be | ||
638 | * quickly and easily looked up from the | ||
639 | * corresponding FDE. | ||
640 | */ | ||
641 | cie->cie_pointer = (unsigned long)entry; | ||
642 | |||
643 | cie->version = *(char *)p++; | ||
644 | BUG_ON(cie->version != 1); | ||
645 | |||
646 | cie->augmentation = p; | ||
647 | p += strlen(cie->augmentation) + 1; | ||
648 | |||
649 | count = dwarf_read_uleb128(p, &cie->code_alignment_factor); | ||
650 | p += count; | ||
651 | |||
652 | count = dwarf_read_leb128(p, &cie->data_alignment_factor); | ||
653 | p += count; | ||
654 | |||
655 | /* | ||
656 | * Which column in the rule table contains the | ||
657 | * return address? | ||
658 | */ | ||
659 | if (cie->version == 1) { | ||
660 | cie->return_address_reg = __raw_readb(p); | ||
661 | p++; | ||
662 | } else { | ||
663 | count = dwarf_read_uleb128(p, &cie->return_address_reg); | ||
664 | p += count; | ||
665 | } | ||
666 | |||
667 | if (cie->augmentation[0] == 'z') { | ||
668 | unsigned int length, count; | ||
669 | cie->flags |= DWARF_CIE_Z_AUGMENTATION; | ||
670 | |||
671 | count = dwarf_read_uleb128(p, &length); | ||
672 | p += count; | ||
673 | |||
674 | BUG_ON((unsigned char *)p > end); | ||
675 | |||
676 | cie->initial_instructions = p + length; | ||
677 | cie->augmentation++; | ||
678 | } | ||
679 | |||
680 | while (*cie->augmentation) { | ||
681 | /* | ||
682 | * "L" indicates a byte showing how the | ||
683 | * LSDA pointer is encoded. Skip it. | ||
684 | */ | ||
685 | if (*cie->augmentation == 'L') { | ||
686 | p++; | ||
687 | cie->augmentation++; | ||
688 | } else if (*cie->augmentation == 'R') { | ||
689 | /* | ||
690 | * "R" indicates a byte showing | ||
691 | * how FDE addresses are | ||
692 | * encoded. | ||
693 | */ | ||
694 | cie->encoding = *(char *)p++; | ||
695 | cie->augmentation++; | ||
696 | } else if (*cie->augmentation == 'P') { | ||
697 | /* | ||
698 | * "R" indicates a personality | ||
699 | * routine in the CIE | ||
700 | * augmentation. | ||
701 | */ | ||
702 | BUG(); | ||
703 | } else if (*cie->augmentation == 'S') { | ||
704 | BUG(); | ||
705 | } else { | ||
706 | /* | ||
707 | * Unknown augmentation. Assume | ||
708 | * 'z' augmentation. | ||
709 | */ | ||
710 | p = cie->initial_instructions; | ||
711 | BUG_ON(!p); | ||
712 | break; | ||
713 | } | ||
714 | } | ||
715 | |||
716 | cie->initial_instructions = p; | ||
717 | cie->instructions_end = end; | ||
718 | |||
719 | /* Add to list */ | ||
720 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
721 | list_add_tail(&cie->link, &dwarf_cie_list); | ||
722 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
723 | |||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | static int dwarf_parse_fde(void *entry, u32 entry_type, | ||
728 | void *start, unsigned long len) | ||
729 | { | ||
730 | struct dwarf_fde *fde; | ||
731 | struct dwarf_cie *cie; | ||
732 | unsigned long flags; | ||
733 | int count; | ||
734 | void *p = start; | ||
735 | |||
736 | fde = kzalloc(sizeof(*fde), GFP_KERNEL); | ||
737 | if (!fde) | ||
738 | return -ENOMEM; | ||
739 | |||
740 | fde->length = len; | ||
741 | |||
742 | /* | ||
743 | * In a .eh_frame section the CIE pointer is the | ||
744 | * delta between the address within the FDE | ||
745 | */ | ||
746 | fde->cie_pointer = (unsigned long)(p - entry_type - 4); | ||
747 | |||
748 | cie = dwarf_lookup_cie(fde->cie_pointer); | ||
749 | fde->cie = cie; | ||
750 | |||
751 | if (cie->encoding) | ||
752 | count = dwarf_read_encoded_value(p, &fde->initial_location, | ||
753 | cie->encoding); | ||
754 | else | ||
755 | count = dwarf_read_addr(p, &fde->initial_location); | ||
756 | |||
757 | p += count; | ||
758 | |||
759 | if (cie->encoding) | ||
760 | count = dwarf_read_encoded_value(p, &fde->address_range, | ||
761 | cie->encoding & 0x0f); | ||
762 | else | ||
763 | count = dwarf_read_addr(p, &fde->address_range); | ||
764 | |||
765 | p += count; | ||
766 | |||
767 | if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { | ||
768 | unsigned int length; | ||
769 | count = dwarf_read_uleb128(p, &length); | ||
770 | p += count + length; | ||
771 | } | ||
772 | |||
773 | /* Call frame instructions. */ | ||
774 | fde->instructions = p; | ||
775 | fde->end = start + len; | ||
776 | |||
777 | /* Add to list. */ | ||
778 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
779 | list_add_tail(&fde->link, &dwarf_fde_list); | ||
780 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
781 | |||
782 | return 0; | ||
783 | } | ||
784 | |||
785 | static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, | ||
786 | unsigned long *sp, | ||
787 | const struct stacktrace_ops *ops, void *data) | ||
788 | { | ||
789 | struct dwarf_frame *frame; | ||
790 | |||
791 | frame = dwarf_unwind_stack(0, NULL); | ||
792 | |||
793 | while (frame && frame->return_addr) { | ||
794 | ops->address(data, frame->return_addr, 1); | ||
795 | frame = frame->next; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | static struct unwinder dwarf_unwinder = { | ||
800 | .name = "dwarf-unwinder", | ||
801 | .dump = dwarf_unwinder_dump, | ||
802 | .rating = 150, | ||
803 | }; | ||
804 | |||
805 | static void dwarf_unwinder_cleanup(void) | ||
806 | { | ||
807 | struct dwarf_cie *cie, *m; | ||
808 | struct dwarf_fde *fde, *n; | ||
809 | unsigned long flags; | ||
810 | |||
811 | /* | ||
812 | * Deallocate all the memory allocated for the DWARF unwinder. | ||
813 | * Traverse all the FDE/CIE lists and remove and free all the | ||
814 | * memory associated with those data structures. | ||
815 | */ | ||
816 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
817 | list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) | ||
818 | kfree(cie); | ||
819 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
820 | |||
821 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
822 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) | ||
823 | kfree(fde); | ||
824 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
825 | } | ||
826 | |||
827 | /** | ||
828 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
829 | * | ||
830 | * Build the data structures describing the .dwarf_frame section to | ||
831 | * make it easier to lookup CIE and FDE entries. Because the | ||
832 | * .eh_frame section is packed as tightly as possible it is not | ||
833 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
834 | * and CIE entries that make it easier. | ||
835 | */ | ||
836 | void dwarf_unwinder_init(void) | ||
837 | { | ||
838 | u32 entry_type; | ||
839 | void *p, *entry; | ||
840 | int count, err; | ||
841 | unsigned long len; | ||
842 | unsigned int c_entries, f_entries; | ||
843 | unsigned char *end; | ||
844 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
845 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
846 | |||
847 | c_entries = 0; | ||
848 | f_entries = 0; | ||
849 | entry = &__start_eh_frame; | ||
850 | |||
851 | while ((char *)entry < __stop_eh_frame) { | ||
852 | p = entry; | ||
853 | |||
854 | count = dwarf_entry_len(p, &len); | ||
855 | if (count == 0) { | ||
856 | /* | ||
857 | * We read a bogus length field value. There is | ||
858 | * nothing we can do here apart from disabling | ||
859 | * the DWARF unwinder. We can't even skip this | ||
860 | * entry and move to the next one because 'len' | ||
861 | * tells us where our next entry is. | ||
862 | */ | ||
863 | goto out; | ||
864 | } else | ||
865 | p += count; | ||
866 | |||
867 | /* initial length does not include itself */ | ||
868 | end = p + len; | ||
869 | |||
870 | entry_type = get_unaligned((u32 *)p); | ||
871 | p += 4; | ||
872 | |||
873 | if (entry_type == DW_EH_FRAME_CIE) { | ||
874 | err = dwarf_parse_cie(entry, p, len, end); | ||
875 | if (err < 0) | ||
876 | goto out; | ||
877 | else | ||
878 | c_entries++; | ||
879 | } else { | ||
880 | err = dwarf_parse_fde(entry, entry_type, p, len); | ||
881 | if (err < 0) | ||
882 | goto out; | ||
883 | else | ||
884 | f_entries++; | ||
885 | } | ||
886 | |||
887 | entry = (char *)entry + len + 4; | ||
888 | } | ||
889 | |||
890 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | ||
891 | c_entries, f_entries); | ||
892 | |||
893 | err = unwinder_register(&dwarf_unwinder); | ||
894 | if (err) | ||
895 | goto out; | ||
896 | |||
897 | return; | ||
898 | |||
899 | out: | ||
900 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); | ||
901 | dwarf_unwinder_cleanup(); | ||
902 | } | ||