diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 16:37:14 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 16:37:14 -0400 |
commit | fa9d3b4da55fc8949efc2b4220d93f188fbcebd6 (patch) | |
tree | ead3e046b7e6e101d1d27db36116c52405d43c36 /arch/sh/kernel | |
parent | c01f0f1a4a96eb3acc5850e18cc43f24366966d0 (diff) | |
parent | 74db2479c1fecefd0a190f282f28f00565309807 (diff) |
Merge branch 'sh/dwarf-unwinder'
Conflicts:
arch/sh/kernel/cpu/sh3/entry.S
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/cpu/sh3/entry.S | 2 | ||||
-rw-r--r-- | arch/sh/kernel/dwarf.c | 278 | ||||
-rw-r--r-- | arch/sh/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/sh/kernel/traps.c | 24 | ||||
-rw-r--r-- | arch/sh/kernel/traps_32.c | 1 | ||||
-rw-r--r-- | arch/sh/kernel/unwinder.c | 16 |
6 files changed, 213 insertions, 112 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 272636ec5c98..8c19e21847d7 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
@@ -508,6 +508,8 @@ ENTRY(handle_interrupt) | |||
508 | bsr save_regs ! needs original pr value in k3 | 508 | bsr save_regs ! needs original pr value in k3 |
509 | mov #-1, k2 ! default vector kept in k2 | 509 | mov #-1, k2 ! default vector kept in k2 |
510 | 510 | ||
511 | setup_frame_reg | ||
512 | |||
511 | stc sr, r0 ! get status register | 513 | stc sr, r0 ! get status register |
512 | shlr2 r0 | 514 | shlr2 r0 |
513 | and #0x3c, r0 | 515 | and #0x3c, r0 |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 5fd6e604816d..577302f31e6a 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -11,12 +11,14 @@ | |||
11 | * | 11 | * |
12 | * TODO: | 12 | * TODO: |
13 | * - DWARF64 doesn't work. | 13 | * - DWARF64 doesn't work. |
14 | * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. | ||
14 | */ | 15 | */ |
15 | 16 | ||
16 | /* #define DEBUG */ | 17 | /* #define DEBUG */ |
17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
18 | #include <linux/io.h> | 19 | #include <linux/io.h> |
19 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/mempool.h> | ||
20 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
21 | #include <asm/dwarf.h> | 23 | #include <asm/dwarf.h> |
22 | #include <asm/unwinder.h> | 24 | #include <asm/unwinder.h> |
@@ -25,55 +27,89 @@ | |||
25 | #include <asm/dwarf.h> | 27 | #include <asm/dwarf.h> |
26 | #include <asm/stacktrace.h> | 28 | #include <asm/stacktrace.h> |
27 | 29 | ||
30 | /* Reserve enough memory for two stack frames */ | ||
31 | #define DWARF_FRAME_MIN_REQ 2 | ||
32 | /* ... with 4 registers per frame. */ | ||
33 | #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) | ||
34 | |||
35 | static struct kmem_cache *dwarf_frame_cachep; | ||
36 | static mempool_t *dwarf_frame_pool; | ||
37 | |||
38 | static struct kmem_cache *dwarf_reg_cachep; | ||
39 | static mempool_t *dwarf_reg_pool; | ||
40 | |||
28 | static LIST_HEAD(dwarf_cie_list); | 41 | static LIST_HEAD(dwarf_cie_list); |
29 | DEFINE_SPINLOCK(dwarf_cie_lock); | 42 | static DEFINE_SPINLOCK(dwarf_cie_lock); |
30 | 43 | ||
31 | static LIST_HEAD(dwarf_fde_list); | 44 | static LIST_HEAD(dwarf_fde_list); |
32 | DEFINE_SPINLOCK(dwarf_fde_lock); | 45 | static DEFINE_SPINLOCK(dwarf_fde_lock); |
33 | 46 | ||
34 | static struct dwarf_cie *cached_cie; | 47 | static struct dwarf_cie *cached_cie; |
35 | 48 | ||
36 | /* | 49 | /** |
37 | * Figure out whether we need to allocate some dwarf registers. If dwarf | 50 | * dwarf_frame_alloc_reg - allocate memory for a DWARF register |
38 | * registers have already been allocated then we may need to realloc | 51 | * @frame: the DWARF frame whose list of registers we insert on |
39 | * them. "reg" is a register number that we need to be able to access | 52 | * @reg_num: the register number |
40 | * after this call. | 53 | * |
54 | * Allocate space for, and initialise, a dwarf reg from | ||
55 | * dwarf_reg_pool and insert it onto the (unsorted) linked-list of | ||
56 | * dwarf registers for @frame. | ||
41 | * | 57 | * |
42 | * Register numbers start at zero, therefore we need to allocate space | 58 | * Return the initialised DWARF reg. |
43 | * for "reg" + 1 registers. | ||
44 | */ | 59 | */ |
45 | static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, | 60 | static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, |
46 | unsigned int reg) | 61 | unsigned int reg_num) |
47 | { | 62 | { |
48 | struct dwarf_reg *regs; | 63 | struct dwarf_reg *reg; |
49 | unsigned int num_regs = reg + 1; | ||
50 | size_t new_size; | ||
51 | size_t old_size; | ||
52 | |||
53 | new_size = num_regs * sizeof(*regs); | ||
54 | old_size = frame->num_regs * sizeof(*regs); | ||
55 | 64 | ||
56 | /* Fast path: don't allocate any regs if we've already got enough. */ | 65 | reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); |
57 | if (frame->num_regs >= num_regs) | 66 | if (!reg) { |
58 | return; | 67 | printk(KERN_WARNING "Unable to allocate a DWARF register\n"); |
59 | |||
60 | regs = kzalloc(new_size, GFP_ATOMIC); | ||
61 | if (!regs) { | ||
62 | printk(KERN_WARNING "Unable to allocate DWARF registers\n"); | ||
63 | /* | 68 | /* |
64 | * Let's just bomb hard here, we have no way to | 69 | * Let's just bomb hard here, we have no way to |
65 | * gracefully recover. | 70 | * gracefully recover. |
66 | */ | 71 | */ |
67 | BUG(); | 72 | UNWINDER_BUG(); |
68 | } | 73 | } |
69 | 74 | ||
70 | if (frame->regs) { | 75 | reg->number = reg_num; |
71 | memcpy(regs, frame->regs, old_size); | 76 | reg->addr = 0; |
72 | kfree(frame->regs); | 77 | reg->flags = 0; |
78 | |||
79 | list_add(®->link, &frame->reg_list); | ||
80 | |||
81 | return reg; | ||
82 | } | ||
83 | |||
84 | static void dwarf_frame_free_regs(struct dwarf_frame *frame) | ||
85 | { | ||
86 | struct dwarf_reg *reg, *n; | ||
87 | |||
88 | list_for_each_entry_safe(reg, n, &frame->reg_list, link) { | ||
89 | list_del(®->link); | ||
90 | mempool_free(reg, dwarf_reg_pool); | ||
73 | } | 91 | } |
92 | } | ||
74 | 93 | ||
75 | frame->regs = regs; | 94 | /** |
76 | frame->num_regs = num_regs; | 95 | * dwarf_frame_reg - return a DWARF register |
96 | * @frame: the DWARF frame to search in for @reg_num | ||
97 | * @reg_num: the register number to search for | ||
98 | * | ||
99 | * Lookup and return the dwarf reg @reg_num for this frame. Return | ||
100 | * NULL if @reg_num is an register invalid number. | ||
101 | */ | ||
102 | static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, | ||
103 | unsigned int reg_num) | ||
104 | { | ||
105 | struct dwarf_reg *reg; | ||
106 | |||
107 | list_for_each_entry(reg, &frame->reg_list, link) { | ||
108 | if (reg->number == reg_num) | ||
109 | return reg; | ||
110 | } | ||
111 | |||
112 | return NULL; | ||
77 | } | 113 | } |
78 | 114 | ||
79 | /** | 115 | /** |
@@ -196,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, | |||
196 | break; | 232 | break; |
197 | default: | 233 | default: |
198 | pr_debug("encoding=0x%x\n", (encoding & 0x70)); | 234 | pr_debug("encoding=0x%x\n", (encoding & 0x70)); |
199 | BUG(); | 235 | UNWINDER_BUG(); |
200 | } | 236 | } |
201 | 237 | ||
202 | if ((encoding & 0x07) == 0x00) | 238 | if ((encoding & 0x07) == 0x00) |
@@ -211,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, | |||
211 | break; | 247 | break; |
212 | default: | 248 | default: |
213 | pr_debug("encoding=0x%x\n", encoding); | 249 | pr_debug("encoding=0x%x\n", encoding); |
214 | BUG(); | 250 | UNWINDER_BUG(); |
215 | } | 251 | } |
216 | 252 | ||
217 | return count; | 253 | return count; |
@@ -264,7 +300,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) | |||
264 | */ | 300 | */ |
265 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | 301 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) |
266 | { | 302 | { |
267 | struct dwarf_cie *cie, *n; | 303 | struct dwarf_cie *cie; |
268 | unsigned long flags; | 304 | unsigned long flags; |
269 | 305 | ||
270 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 306 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
@@ -278,7 +314,7 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | |||
278 | goto out; | 314 | goto out; |
279 | } | 315 | } |
280 | 316 | ||
281 | list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { | 317 | list_for_each_entry(cie, &dwarf_cie_list, link) { |
282 | if (cie->cie_pointer == cie_ptr) { | 318 | if (cie->cie_pointer == cie_ptr) { |
283 | cached_cie = cie; | 319 | cached_cie = cie; |
284 | break; | 320 | break; |
@@ -299,11 +335,12 @@ out: | |||
299 | */ | 335 | */ |
300 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | 336 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) |
301 | { | 337 | { |
338 | struct dwarf_fde *fde; | ||
302 | unsigned long flags; | 339 | unsigned long flags; |
303 | struct dwarf_fde *fde, *n; | ||
304 | 340 | ||
305 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 341 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
306 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { | 342 | |
343 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
307 | unsigned long start, end; | 344 | unsigned long start, end; |
308 | 345 | ||
309 | start = fde->initial_location; | 346 | start = fde->initial_location; |
@@ -346,6 +383,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
346 | unsigned char insn; | 383 | unsigned char insn; |
347 | unsigned char *current_insn; | 384 | unsigned char *current_insn; |
348 | unsigned int count, delta, reg, expr_len, offset; | 385 | unsigned int count, delta, reg, expr_len, offset; |
386 | struct dwarf_reg *regp; | ||
349 | 387 | ||
350 | current_insn = insn_start; | 388 | current_insn = insn_start; |
351 | 389 | ||
@@ -368,9 +406,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
368 | count = dwarf_read_uleb128(current_insn, &offset); | 406 | count = dwarf_read_uleb128(current_insn, &offset); |
369 | current_insn += count; | 407 | current_insn += count; |
370 | offset *= cie->data_alignment_factor; | 408 | offset *= cie->data_alignment_factor; |
371 | dwarf_frame_alloc_regs(frame, reg); | 409 | regp = dwarf_frame_alloc_reg(frame, reg); |
372 | frame->regs[reg].addr = offset; | 410 | regp->addr = offset; |
373 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | 411 | regp->flags |= DWARF_REG_OFFSET; |
374 | continue; | 412 | continue; |
375 | /* NOTREACHED */ | 413 | /* NOTREACHED */ |
376 | case DW_CFA_restore: | 414 | case DW_CFA_restore: |
@@ -414,6 +452,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
414 | case DW_CFA_undefined: | 452 | case DW_CFA_undefined: |
415 | count = dwarf_read_uleb128(current_insn, ®); | 453 | count = dwarf_read_uleb128(current_insn, ®); |
416 | current_insn += count; | 454 | current_insn += count; |
455 | regp = dwarf_frame_alloc_reg(frame, reg); | ||
456 | regp->flags |= DWARF_UNDEFINED; | ||
417 | break; | 457 | break; |
418 | case DW_CFA_def_cfa: | 458 | case DW_CFA_def_cfa: |
419 | count = dwarf_read_uleb128(current_insn, | 459 | count = dwarf_read_uleb128(current_insn, |
@@ -452,17 +492,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
452 | count = dwarf_read_leb128(current_insn, &offset); | 492 | count = dwarf_read_leb128(current_insn, &offset); |
453 | current_insn += count; | 493 | current_insn += count; |
454 | offset *= cie->data_alignment_factor; | 494 | offset *= cie->data_alignment_factor; |
455 | dwarf_frame_alloc_regs(frame, reg); | 495 | regp = dwarf_frame_alloc_reg(frame, reg); |
456 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | 496 | regp->flags |= DWARF_REG_OFFSET; |
457 | frame->regs[reg].addr = offset; | 497 | regp->addr = offset; |
458 | break; | 498 | break; |
459 | case DW_CFA_val_offset: | 499 | case DW_CFA_val_offset: |
460 | count = dwarf_read_uleb128(current_insn, ®); | 500 | count = dwarf_read_uleb128(current_insn, ®); |
461 | current_insn += count; | 501 | current_insn += count; |
462 | count = dwarf_read_leb128(current_insn, &offset); | 502 | count = dwarf_read_leb128(current_insn, &offset); |
463 | offset *= cie->data_alignment_factor; | 503 | offset *= cie->data_alignment_factor; |
464 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | 504 | regp = dwarf_frame_alloc_reg(frame, reg); |
465 | frame->regs[reg].addr = offset; | 505 | regp->flags |= DWARF_VAL_OFFSET; |
506 | regp->addr = offset; | ||
466 | break; | 507 | break; |
467 | case DW_CFA_GNU_args_size: | 508 | case DW_CFA_GNU_args_size: |
468 | count = dwarf_read_uleb128(current_insn, &offset); | 509 | count = dwarf_read_uleb128(current_insn, &offset); |
@@ -473,12 +514,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
473 | current_insn += count; | 514 | current_insn += count; |
474 | count = dwarf_read_uleb128(current_insn, &offset); | 515 | count = dwarf_read_uleb128(current_insn, &offset); |
475 | offset *= cie->data_alignment_factor; | 516 | offset *= cie->data_alignment_factor; |
476 | dwarf_frame_alloc_regs(frame, reg); | 517 | |
477 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | 518 | regp = dwarf_frame_alloc_reg(frame, reg); |
478 | frame->regs[reg].addr = -offset; | 519 | regp->flags |= DWARF_REG_OFFSET; |
520 | regp->addr = -offset; | ||
479 | break; | 521 | break; |
480 | default: | 522 | default: |
481 | pr_debug("unhandled DWARF instruction 0x%x\n", insn); | 523 | pr_debug("unhandled DWARF instruction 0x%x\n", insn); |
524 | UNWINDER_BUG(); | ||
482 | break; | 525 | break; |
483 | } | 526 | } |
484 | } | 527 | } |
@@ -495,14 +538,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
495 | * on the callstack. Each of the lower (older) stack frames are | 538 | * on the callstack. Each of the lower (older) stack frames are |
496 | * linked via the "prev" member. | 539 | * linked via the "prev" member. |
497 | */ | 540 | */ |
498 | struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | 541 | struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, |
499 | struct dwarf_frame *prev) | 542 | struct dwarf_frame *prev) |
500 | { | 543 | { |
501 | struct dwarf_frame *frame; | 544 | struct dwarf_frame *frame; |
502 | struct dwarf_cie *cie; | 545 | struct dwarf_cie *cie; |
503 | struct dwarf_fde *fde; | 546 | struct dwarf_fde *fde; |
547 | struct dwarf_reg *reg; | ||
504 | unsigned long addr; | 548 | unsigned long addr; |
505 | int i, offset; | ||
506 | 549 | ||
507 | /* | 550 | /* |
508 | * If this is the first invocation of this recursive function we | 551 | * If this is the first invocation of this recursive function we |
@@ -515,11 +558,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | |||
515 | if (!pc && !prev) | 558 | if (!pc && !prev) |
516 | pc = (unsigned long)current_text_addr(); | 559 | pc = (unsigned long)current_text_addr(); |
517 | 560 | ||
518 | frame = kzalloc(sizeof(*frame), GFP_ATOMIC); | 561 | frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); |
519 | if (!frame) | 562 | if (!frame) { |
520 | return NULL; | 563 | printk(KERN_ERR "Unable to allocate a dwarf frame\n"); |
564 | UNWINDER_BUG(); | ||
565 | } | ||
521 | 566 | ||
567 | INIT_LIST_HEAD(&frame->reg_list); | ||
568 | frame->flags = 0; | ||
522 | frame->prev = prev; | 569 | frame->prev = prev; |
570 | frame->return_addr = 0; | ||
523 | 571 | ||
524 | fde = dwarf_lookup_fde(pc); | 572 | fde = dwarf_lookup_fde(pc); |
525 | if (!fde) { | 573 | if (!fde) { |
@@ -539,7 +587,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | |||
539 | * case above, which sucks because we could print a | 587 | * case above, which sucks because we could print a |
540 | * warning here. | 588 | * warning here. |
541 | */ | 589 | */ |
542 | return NULL; | 590 | goto bail; |
543 | } | 591 | } |
544 | 592 | ||
545 | cie = dwarf_lookup_cie(fde->cie_pointer); | 593 | cie = dwarf_lookup_cie(fde->cie_pointer); |
@@ -559,10 +607,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | |||
559 | switch (frame->flags) { | 607 | switch (frame->flags) { |
560 | case DWARF_FRAME_CFA_REG_OFFSET: | 608 | case DWARF_FRAME_CFA_REG_OFFSET: |
561 | if (prev) { | 609 | if (prev) { |
562 | BUG_ON(!prev->regs[frame->cfa_register].flags); | 610 | reg = dwarf_frame_reg(prev, frame->cfa_register); |
611 | UNWINDER_BUG_ON(!reg); | ||
612 | UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); | ||
563 | 613 | ||
564 | addr = prev->cfa; | 614 | addr = prev->cfa + reg->addr; |
565 | addr += prev->regs[frame->cfa_register].addr; | ||
566 | frame->cfa = __raw_readl(addr); | 615 | frame->cfa = __raw_readl(addr); |
567 | 616 | ||
568 | } else { | 617 | } else { |
@@ -579,27 +628,30 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | |||
579 | frame->cfa += frame->cfa_offset; | 628 | frame->cfa += frame->cfa_offset; |
580 | break; | 629 | break; |
581 | default: | 630 | default: |
582 | BUG(); | 631 | UNWINDER_BUG(); |
583 | } | 632 | } |
584 | 633 | ||
585 | /* If we haven't seen the return address reg, we're screwed. */ | 634 | reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); |
586 | BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); | ||
587 | 635 | ||
588 | for (i = 0; i <= frame->num_regs; i++) { | 636 | /* |
589 | struct dwarf_reg *reg = &frame->regs[i]; | 637 | * If we haven't seen the return address register or the return |
638 | * address column is undefined then we must assume that this is | ||
639 | * the end of the callstack. | ||
640 | */ | ||
641 | if (!reg || reg->flags == DWARF_UNDEFINED) | ||
642 | goto bail; | ||
590 | 643 | ||
591 | if (!reg->flags) | 644 | UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); |
592 | continue; | ||
593 | 645 | ||
594 | offset = reg->addr; | 646 | addr = frame->cfa + reg->addr; |
595 | offset += frame->cfa; | ||
596 | } | ||
597 | |||
598 | addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; | ||
599 | frame->return_addr = __raw_readl(addr); | 647 | frame->return_addr = __raw_readl(addr); |
600 | 648 | ||
601 | frame->next = dwarf_unwind_stack(frame->return_addr, frame); | ||
602 | return frame; | 649 | return frame; |
650 | |||
651 | bail: | ||
652 | dwarf_frame_free_regs(frame); | ||
653 | mempool_free(frame, dwarf_frame_pool); | ||
654 | return NULL; | ||
603 | } | 655 | } |
604 | 656 | ||
605 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 657 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
@@ -624,7 +676,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
624 | cie->cie_pointer = (unsigned long)entry; | 676 | cie->cie_pointer = (unsigned long)entry; |
625 | 677 | ||
626 | cie->version = *(char *)p++; | 678 | cie->version = *(char *)p++; |
627 | BUG_ON(cie->version != 1); | 679 | UNWINDER_BUG_ON(cie->version != 1); |
628 | 680 | ||
629 | cie->augmentation = p; | 681 | cie->augmentation = p; |
630 | p += strlen(cie->augmentation) + 1; | 682 | p += strlen(cie->augmentation) + 1; |
@@ -654,7 +706,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
654 | count = dwarf_read_uleb128(p, &length); | 706 | count = dwarf_read_uleb128(p, &length); |
655 | p += count; | 707 | p += count; |
656 | 708 | ||
657 | BUG_ON((unsigned char *)p > end); | 709 | UNWINDER_BUG_ON((unsigned char *)p > end); |
658 | 710 | ||
659 | cie->initial_instructions = p + length; | 711 | cie->initial_instructions = p + length; |
660 | cie->augmentation++; | 712 | cie->augmentation++; |
@@ -682,16 +734,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
682 | * routine in the CIE | 734 | * routine in the CIE |
683 | * augmentation. | 735 | * augmentation. |
684 | */ | 736 | */ |
685 | BUG(); | 737 | UNWINDER_BUG(); |
686 | } else if (*cie->augmentation == 'S') { | 738 | } else if (*cie->augmentation == 'S') { |
687 | BUG(); | 739 | UNWINDER_BUG(); |
688 | } else { | 740 | } else { |
689 | /* | 741 | /* |
690 | * Unknown augmentation. Assume | 742 | * Unknown augmentation. Assume |
691 | * 'z' augmentation. | 743 | * 'z' augmentation. |
692 | */ | 744 | */ |
693 | p = cie->initial_instructions; | 745 | p = cie->initial_instructions; |
694 | BUG_ON(!p); | 746 | UNWINDER_BUG_ON(!p); |
695 | break; | 747 | break; |
696 | } | 748 | } |
697 | } | 749 | } |
@@ -708,7 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
708 | } | 760 | } |
709 | 761 | ||
710 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 762 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
711 | void *start, unsigned long len) | 763 | void *start, unsigned long len, |
764 | unsigned char *end) | ||
712 | { | 765 | { |
713 | struct dwarf_fde *fde; | 766 | struct dwarf_fde *fde; |
714 | struct dwarf_cie *cie; | 767 | struct dwarf_cie *cie; |
@@ -755,7 +808,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
755 | 808 | ||
756 | /* Call frame instructions. */ | 809 | /* Call frame instructions. */ |
757 | fde->instructions = p; | 810 | fde->instructions = p; |
758 | fde->end = start + len; | 811 | fde->end = end; |
759 | 812 | ||
760 | /* Add to list. */ | 813 | /* Add to list. */ |
761 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 814 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
@@ -765,17 +818,33 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
765 | return 0; | 818 | return 0; |
766 | } | 819 | } |
767 | 820 | ||
768 | static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, | 821 | static void dwarf_unwinder_dump(struct task_struct *task, |
822 | struct pt_regs *regs, | ||
769 | unsigned long *sp, | 823 | unsigned long *sp, |
770 | const struct stacktrace_ops *ops, void *data) | 824 | const struct stacktrace_ops *ops, |
825 | void *data) | ||
771 | { | 826 | { |
772 | struct dwarf_frame *frame; | 827 | struct dwarf_frame *frame, *_frame; |
828 | unsigned long return_addr; | ||
829 | |||
830 | _frame = NULL; | ||
831 | return_addr = 0; | ||
773 | 832 | ||
774 | frame = dwarf_unwind_stack(0, NULL); | 833 | while (1) { |
834 | frame = dwarf_unwind_stack(return_addr, _frame); | ||
775 | 835 | ||
776 | while (frame && frame->return_addr) { | 836 | if (_frame) { |
777 | ops->address(data, frame->return_addr, 1); | 837 | dwarf_frame_free_regs(_frame); |
778 | frame = frame->next; | 838 | mempool_free(_frame, dwarf_frame_pool); |
839 | } | ||
840 | |||
841 | _frame = frame; | ||
842 | |||
843 | if (!frame || !frame->return_addr) | ||
844 | break; | ||
845 | |||
846 | return_addr = frame->return_addr; | ||
847 | ops->address(data, return_addr, 1); | ||
779 | } | 848 | } |
780 | } | 849 | } |
781 | 850 | ||
@@ -787,24 +856,22 @@ static struct unwinder dwarf_unwinder = { | |||
787 | 856 | ||
788 | static void dwarf_unwinder_cleanup(void) | 857 | static void dwarf_unwinder_cleanup(void) |
789 | { | 858 | { |
790 | struct dwarf_cie *cie, *m; | 859 | struct dwarf_cie *cie; |
791 | struct dwarf_fde *fde, *n; | 860 | struct dwarf_fde *fde; |
792 | unsigned long flags; | ||
793 | 861 | ||
794 | /* | 862 | /* |
795 | * Deallocate all the memory allocated for the DWARF unwinder. | 863 | * Deallocate all the memory allocated for the DWARF unwinder. |
796 | * Traverse all the FDE/CIE lists and remove and free all the | 864 | * Traverse all the FDE/CIE lists and remove and free all the |
797 | * memory associated with those data structures. | 865 | * memory associated with those data structures. |
798 | */ | 866 | */ |
799 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 867 | list_for_each_entry(cie, &dwarf_cie_list, link) |
800 | list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) | ||
801 | kfree(cie); | 868 | kfree(cie); |
802 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
803 | 869 | ||
804 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 870 | list_for_each_entry(fde, &dwarf_fde_list, link) |
805 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) | ||
806 | kfree(fde); | 871 | kfree(fde); |
807 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | 872 | |
873 | kmem_cache_destroy(dwarf_reg_cachep); | ||
874 | kmem_cache_destroy(dwarf_frame_cachep); | ||
808 | } | 875 | } |
809 | 876 | ||
810 | /** | 877 | /** |
@@ -816,7 +883,7 @@ static void dwarf_unwinder_cleanup(void) | |||
816 | * easy to lookup the FDE for a given PC, so we build a list of FDE | 883 | * easy to lookup the FDE for a given PC, so we build a list of FDE |
817 | * and CIE entries that make it easier. | 884 | * and CIE entries that make it easier. |
818 | */ | 885 | */ |
819 | void dwarf_unwinder_init(void) | 886 | static int __init dwarf_unwinder_init(void) |
820 | { | 887 | { |
821 | u32 entry_type; | 888 | u32 entry_type; |
822 | void *p, *entry; | 889 | void *p, *entry; |
@@ -831,6 +898,21 @@ void dwarf_unwinder_init(void) | |||
831 | f_entries = 0; | 898 | f_entries = 0; |
832 | entry = &__start_eh_frame; | 899 | entry = &__start_eh_frame; |
833 | 900 | ||
901 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
902 | sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL); | ||
903 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
904 | sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL); | ||
905 | |||
906 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
907 | mempool_alloc_slab, | ||
908 | mempool_free_slab, | ||
909 | dwarf_frame_cachep); | ||
910 | |||
911 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
912 | mempool_alloc_slab, | ||
913 | mempool_free_slab, | ||
914 | dwarf_reg_cachep); | ||
915 | |||
834 | while ((char *)entry < __stop_eh_frame) { | 916 | while ((char *)entry < __stop_eh_frame) { |
835 | p = entry; | 917 | p = entry; |
836 | 918 | ||
@@ -860,7 +942,7 @@ void dwarf_unwinder_init(void) | |||
860 | else | 942 | else |
861 | c_entries++; | 943 | c_entries++; |
862 | } else { | 944 | } else { |
863 | err = dwarf_parse_fde(entry, entry_type, p, len); | 945 | err = dwarf_parse_fde(entry, entry_type, p, len, end); |
864 | if (err < 0) | 946 | if (err < 0) |
865 | goto out; | 947 | goto out; |
866 | else | 948 | else |
@@ -877,9 +959,11 @@ void dwarf_unwinder_init(void) | |||
877 | if (err) | 959 | if (err) |
878 | goto out; | 960 | goto out; |
879 | 961 | ||
880 | return; | 962 | return 0; |
881 | 963 | ||
882 | out: | 964 | out: |
883 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); | 965 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); |
884 | dwarf_unwinder_cleanup(); | 966 | dwarf_unwinder_cleanup(); |
967 | return -EINVAL; | ||
885 | } | 968 | } |
969 | early_initcall(dwarf_unwinder_init); | ||
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 2bb43dc74f22..278c68c60488 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
15 | #include <asm/machvec.h> | 15 | #include <asm/machvec.h> |
16 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
17 | #include <asm/dwarf.h> | ||
18 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
19 | #include <cpu/mmu_context.h> | 18 | #include <cpu/mmu_context.h> |
20 | 19 | ||
@@ -262,9 +261,6 @@ void __init init_IRQ(void) | |||
262 | sh_mv.mv_init_irq(); | 261 | sh_mv.mv_init_irq(); |
263 | 262 | ||
264 | irq_ctx_init(smp_processor_id()); | 263 | irq_ctx_init(smp_processor_id()); |
265 | |||
266 | /* This needs to be early, but not too early.. */ | ||
267 | dwarf_unwinder_init(); | ||
268 | } | 264 | } |
269 | 265 | ||
270 | #ifdef CONFIG_SPARSE_IRQ | 266 | #ifdef CONFIG_SPARSE_IRQ |
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index b3e0067db358..f69bd968fcca 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -5,18 +5,32 @@ | |||
5 | #include <linux/signal.h> | 5 | #include <linux/signal.h> |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <asm/unwinder.h> | ||
8 | #include <asm/system.h> | 9 | #include <asm/system.h> |
9 | 10 | ||
10 | #ifdef CONFIG_BUG | 11 | #ifdef CONFIG_BUG |
11 | static void handle_BUG(struct pt_regs *regs) | 12 | void handle_BUG(struct pt_regs *regs) |
12 | { | 13 | { |
14 | const struct bug_entry *bug; | ||
15 | unsigned long bugaddr = regs->pc; | ||
13 | enum bug_trap_type tt; | 16 | enum bug_trap_type tt; |
14 | tt = report_bug(regs->pc, regs); | 17 | |
18 | if (!is_valid_bugaddr(bugaddr)) | ||
19 | goto invalid; | ||
20 | |||
21 | bug = find_bug(bugaddr); | ||
22 | |||
23 | /* Switch unwinders when unwind_stack() is called */ | ||
24 | if (bug->flags & BUGFLAG_UNWINDER) | ||
25 | unwinder_faulted = 1; | ||
26 | |||
27 | tt = report_bug(bugaddr, regs); | ||
15 | if (tt == BUG_TRAP_TYPE_WARN) { | 28 | if (tt == BUG_TRAP_TYPE_WARN) { |
16 | regs->pc += instruction_size(regs->pc); | 29 | regs->pc += instruction_size(bugaddr); |
17 | return; | 30 | return; |
18 | } | 31 | } |
19 | 32 | ||
33 | invalid: | ||
20 | die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); | 34 | die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); |
21 | } | 35 | } |
22 | 36 | ||
@@ -28,8 +42,10 @@ int is_valid_bugaddr(unsigned long addr) | |||
28 | return 0; | 42 | return 0; |
29 | if (probe_kernel_address((insn_size_t *)addr, opcode)) | 43 | if (probe_kernel_address((insn_size_t *)addr, opcode)) |
30 | return 0; | 44 | return 0; |
45 | if (opcode == TRAPA_BUG_OPCODE) | ||
46 | return 1; | ||
31 | 47 | ||
32 | return opcode == TRAPA_BUG_OPCODE; | 48 | return 0; |
33 | } | 49 | } |
34 | #endif | 50 | #endif |
35 | 51 | ||
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 563426487c6b..05a04b6df844 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -136,6 +136,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |||
136 | regs->pc = fixup->fixup; | 136 | regs->pc = fixup->fixup; |
137 | return; | 137 | return; |
138 | } | 138 | } |
139 | |||
139 | die(str, regs, err); | 140 | die(str, regs, err); |
140 | } | 141 | } |
141 | } | 142 | } |
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index 2b30fa28b440..468889d958f4 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/module.h> | ||
14 | #include <asm/unwinder.h> | 15 | #include <asm/unwinder.h> |
15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
16 | 17 | ||
@@ -53,8 +54,6 @@ static struct list_head unwinder_list = { | |||
53 | 54 | ||
54 | static DEFINE_SPINLOCK(unwinder_lock); | 55 | static DEFINE_SPINLOCK(unwinder_lock); |
55 | 56 | ||
56 | static atomic_t unwinder_running = ATOMIC_INIT(0); | ||
57 | |||
58 | /** | 57 | /** |
59 | * select_unwinder - Select the best registered stack unwinder. | 58 | * select_unwinder - Select the best registered stack unwinder. |
60 | * | 59 | * |
@@ -122,6 +121,8 @@ int unwinder_register(struct unwinder *u) | |||
122 | return ret; | 121 | return ret; |
123 | } | 122 | } |
124 | 123 | ||
124 | int unwinder_faulted = 0; | ||
125 | |||
125 | /* | 126 | /* |
126 | * Unwind the call stack and pass information to the stacktrace_ops | 127 | * Unwind the call stack and pass information to the stacktrace_ops |
127 | * functions. Also handle the case where we need to switch to a new | 128 | * functions. Also handle the case where we need to switch to a new |
@@ -144,19 +145,20 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs, | |||
144 | * Hopefully this will give us a semi-reliable stacktrace so we | 145 | * Hopefully this will give us a semi-reliable stacktrace so we |
145 | * can diagnose why curr_unwinder->dump() faulted. | 146 | * can diagnose why curr_unwinder->dump() faulted. |
146 | */ | 147 | */ |
147 | if (atomic_inc_return(&unwinder_running) != 1) { | 148 | if (unwinder_faulted) { |
148 | spin_lock_irqsave(&unwinder_lock, flags); | 149 | spin_lock_irqsave(&unwinder_lock, flags); |
149 | 150 | ||
150 | if (!list_is_singular(&unwinder_list)) { | 151 | /* Make sure no one beat us to changing the unwinder */ |
152 | if (unwinder_faulted && !list_is_singular(&unwinder_list)) { | ||
151 | list_del(&curr_unwinder->list); | 153 | list_del(&curr_unwinder->list); |
152 | curr_unwinder = select_unwinder(); | 154 | curr_unwinder = select_unwinder(); |
155 | |||
156 | unwinder_faulted = 0; | ||
153 | } | 157 | } |
154 | 158 | ||
155 | spin_unlock_irqrestore(&unwinder_lock, flags); | 159 | spin_unlock_irqrestore(&unwinder_lock, flags); |
156 | atomic_dec(&unwinder_running); | ||
157 | } | 160 | } |
158 | 161 | ||
159 | curr_unwinder->dump(task, regs, sp, ops, data); | 162 | curr_unwinder->dump(task, regs, sp, ops, data); |
160 | |||
161 | atomic_dec(&unwinder_running); | ||
162 | } | 163 | } |
164 | EXPORT_SYMBOL_GPL(unwind_stack); | ||