aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/dwarf.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r--arch/sh/kernel/dwarf.c246
1 files changed, 166 insertions, 80 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index d0652153f576..577302f31e6a 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -11,12 +11,14 @@
11 * 11 *
12 * TODO: 12 * TODO:
13 * - DWARF64 doesn't work. 13 * - DWARF64 doesn't work.
14 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
14 */ 15 */
15 16
16/* #define DEBUG */ 17/* #define DEBUG */
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/io.h> 19#include <linux/io.h>
19#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/mempool.h>
20#include <linux/mm.h> 22#include <linux/mm.h>
21#include <asm/dwarf.h> 23#include <asm/dwarf.h>
22#include <asm/unwinder.h> 24#include <asm/unwinder.h>
@@ -25,6 +27,17 @@
25#include <asm/dwarf.h> 27#include <asm/dwarf.h>
26#include <asm/stacktrace.h> 28#include <asm/stacktrace.h>
27 29
30/* Reserve enough memory for two stack frames */
31#define DWARF_FRAME_MIN_REQ 2
32/* ... with 4 registers per frame. */
33#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
34
35static struct kmem_cache *dwarf_frame_cachep;
36static mempool_t *dwarf_frame_pool;
37
38static struct kmem_cache *dwarf_reg_cachep;
39static mempool_t *dwarf_reg_pool;
40
28static LIST_HEAD(dwarf_cie_list); 41static LIST_HEAD(dwarf_cie_list);
29static DEFINE_SPINLOCK(dwarf_cie_lock); 42static DEFINE_SPINLOCK(dwarf_cie_lock);
30 43
@@ -33,47 +46,70 @@ static DEFINE_SPINLOCK(dwarf_fde_lock);
33 46
34static struct dwarf_cie *cached_cie; 47static struct dwarf_cie *cached_cie;
35 48
36/* 49/**
37 * Figure out whether we need to allocate some dwarf registers. If dwarf 50 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
38 * registers have already been allocated then we may need to realloc 51 * @frame: the DWARF frame whose list of registers we insert on
39 * them. "reg" is a register number that we need to be able to access 52 * @reg_num: the register number
40 * after this call. 53 *
54 * Allocate space for, and initialise, a dwarf reg from
55 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
56 * dwarf registers for @frame.
41 * 57 *
42 * Register numbers start at zero, therefore we need to allocate space 58 * Return the initialised DWARF reg.
43 * for "reg" + 1 registers.
44 */ 59 */
45static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, 60static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
46 unsigned int reg) 61 unsigned int reg_num)
47{ 62{
48 struct dwarf_reg *regs; 63 struct dwarf_reg *reg;
49 unsigned int num_regs = reg + 1;
50 size_t new_size;
51 size_t old_size;
52 64
53 new_size = num_regs * sizeof(*regs); 65 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
54 old_size = frame->num_regs * sizeof(*regs); 66 if (!reg) {
55 67 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
56 /* Fast path: don't allocate any regs if we've already got enough. */
57 if (frame->num_regs >= num_regs)
58 return;
59
60 regs = kzalloc(new_size, GFP_ATOMIC);
61 if (!regs) {
62 printk(KERN_WARNING "Unable to allocate DWARF registers\n");
63 /* 68 /*
64 * Let's just bomb hard here, we have no way to 69 * Let's just bomb hard here, we have no way to
65 * gracefully recover. 70 * gracefully recover.
66 */ 71 */
67 BUG(); 72 UNWINDER_BUG();
73 }
74
75 reg->number = reg_num;
76 reg->addr = 0;
77 reg->flags = 0;
78
79 list_add(&reg->link, &frame->reg_list);
80
81 return reg;
82}
83
84static void dwarf_frame_free_regs(struct dwarf_frame *frame)
85{
86 struct dwarf_reg *reg, *n;
87
88 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
89 list_del(&reg->link);
90 mempool_free(reg, dwarf_reg_pool);
68 } 91 }
92}
93
94/**
95 * dwarf_frame_reg - return a DWARF register
96 * @frame: the DWARF frame to search in for @reg_num
97 * @reg_num: the register number to search for
98 *
99 * Lookup and return the dwarf reg @reg_num for this frame. Return
100 * NULL if @reg_num is an register invalid number.
101 */
102static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
103 unsigned int reg_num)
104{
105 struct dwarf_reg *reg;
69 106
70 if (frame->regs) { 107 list_for_each_entry(reg, &frame->reg_list, link) {
71 memcpy(regs, frame->regs, old_size); 108 if (reg->number == reg_num)
72 kfree(frame->regs); 109 return reg;
73 } 110 }
74 111
75 frame->regs = regs; 112 return NULL;
76 frame->num_regs = num_regs;
77} 113}
78 114
79/** 115/**
@@ -196,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
196 break; 232 break;
197 default: 233 default:
198 pr_debug("encoding=0x%x\n", (encoding & 0x70)); 234 pr_debug("encoding=0x%x\n", (encoding & 0x70));
199 BUG(); 235 UNWINDER_BUG();
200 } 236 }
201 237
202 if ((encoding & 0x07) == 0x00) 238 if ((encoding & 0x07) == 0x00)
@@ -211,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
211 break; 247 break;
212 default: 248 default:
213 pr_debug("encoding=0x%x\n", encoding); 249 pr_debug("encoding=0x%x\n", encoding);
214 BUG(); 250 UNWINDER_BUG();
215 } 251 }
216 252
217 return count; 253 return count;
@@ -347,6 +383,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
347 unsigned char insn; 383 unsigned char insn;
348 unsigned char *current_insn; 384 unsigned char *current_insn;
349 unsigned int count, delta, reg, expr_len, offset; 385 unsigned int count, delta, reg, expr_len, offset;
386 struct dwarf_reg *regp;
350 387
351 current_insn = insn_start; 388 current_insn = insn_start;
352 389
@@ -369,9 +406,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
369 count = dwarf_read_uleb128(current_insn, &offset); 406 count = dwarf_read_uleb128(current_insn, &offset);
370 current_insn += count; 407 current_insn += count;
371 offset *= cie->data_alignment_factor; 408 offset *= cie->data_alignment_factor;
372 dwarf_frame_alloc_regs(frame, reg); 409 regp = dwarf_frame_alloc_reg(frame, reg);
373 frame->regs[reg].addr = offset; 410 regp->addr = offset;
374 frame->regs[reg].flags |= DWARF_REG_OFFSET; 411 regp->flags |= DWARF_REG_OFFSET;
375 continue; 412 continue;
376 /* NOTREACHED */ 413 /* NOTREACHED */
377 case DW_CFA_restore: 414 case DW_CFA_restore:
@@ -415,6 +452,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
415 case DW_CFA_undefined: 452 case DW_CFA_undefined:
416 count = dwarf_read_uleb128(current_insn, &reg); 453 count = dwarf_read_uleb128(current_insn, &reg);
417 current_insn += count; 454 current_insn += count;
455 regp = dwarf_frame_alloc_reg(frame, reg);
456 regp->flags |= DWARF_UNDEFINED;
418 break; 457 break;
419 case DW_CFA_def_cfa: 458 case DW_CFA_def_cfa:
420 count = dwarf_read_uleb128(current_insn, 459 count = dwarf_read_uleb128(current_insn,
@@ -453,17 +492,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
453 count = dwarf_read_leb128(current_insn, &offset); 492 count = dwarf_read_leb128(current_insn, &offset);
454 current_insn += count; 493 current_insn += count;
455 offset *= cie->data_alignment_factor; 494 offset *= cie->data_alignment_factor;
456 dwarf_frame_alloc_regs(frame, reg); 495 regp = dwarf_frame_alloc_reg(frame, reg);
457 frame->regs[reg].flags |= DWARF_REG_OFFSET; 496 regp->flags |= DWARF_REG_OFFSET;
458 frame->regs[reg].addr = offset; 497 regp->addr = offset;
459 break; 498 break;
460 case DW_CFA_val_offset: 499 case DW_CFA_val_offset:
461 count = dwarf_read_uleb128(current_insn, &reg); 500 count = dwarf_read_uleb128(current_insn, &reg);
462 current_insn += count; 501 current_insn += count;
463 count = dwarf_read_leb128(current_insn, &offset); 502 count = dwarf_read_leb128(current_insn, &offset);
464 offset *= cie->data_alignment_factor; 503 offset *= cie->data_alignment_factor;
465 frame->regs[reg].flags |= DWARF_REG_OFFSET; 504 regp = dwarf_frame_alloc_reg(frame, reg);
466 frame->regs[reg].addr = offset; 505 regp->flags |= DWARF_VAL_OFFSET;
506 regp->addr = offset;
467 break; 507 break;
468 case DW_CFA_GNU_args_size: 508 case DW_CFA_GNU_args_size:
469 count = dwarf_read_uleb128(current_insn, &offset); 509 count = dwarf_read_uleb128(current_insn, &offset);
@@ -474,12 +514,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
474 current_insn += count; 514 current_insn += count;
475 count = dwarf_read_uleb128(current_insn, &offset); 515 count = dwarf_read_uleb128(current_insn, &offset);
476 offset *= cie->data_alignment_factor; 516 offset *= cie->data_alignment_factor;
477 dwarf_frame_alloc_regs(frame, reg); 517
478 frame->regs[reg].flags |= DWARF_REG_OFFSET; 518 regp = dwarf_frame_alloc_reg(frame, reg);
479 frame->regs[reg].addr = -offset; 519 regp->flags |= DWARF_REG_OFFSET;
520 regp->addr = -offset;
480 break; 521 break;
481 default: 522 default:
482 pr_debug("unhandled DWARF instruction 0x%x\n", insn); 523 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
524 UNWINDER_BUG();
483 break; 525 break;
484 } 526 }
485 } 527 }
@@ -496,14 +538,14 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
496 * on the callstack. Each of the lower (older) stack frames are 538 * on the callstack. Each of the lower (older) stack frames are
497 * linked via the "prev" member. 539 * linked via the "prev" member.
498 */ 540 */
499struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, 541struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
500 struct dwarf_frame *prev) 542 struct dwarf_frame *prev)
501{ 543{
502 struct dwarf_frame *frame; 544 struct dwarf_frame *frame;
503 struct dwarf_cie *cie; 545 struct dwarf_cie *cie;
504 struct dwarf_fde *fde; 546 struct dwarf_fde *fde;
547 struct dwarf_reg *reg;
505 unsigned long addr; 548 unsigned long addr;
506 int i, offset;
507 549
508 /* 550 /*
509 * If this is the first invocation of this recursive function we 551 * If this is the first invocation of this recursive function we
@@ -516,11 +558,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
516 if (!pc && !prev) 558 if (!pc && !prev)
517 pc = (unsigned long)current_text_addr(); 559 pc = (unsigned long)current_text_addr();
518 560
519 frame = kzalloc(sizeof(*frame), GFP_ATOMIC); 561 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
520 if (!frame) 562 if (!frame) {
521 return NULL; 563 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
564 UNWINDER_BUG();
565 }
522 566
567 INIT_LIST_HEAD(&frame->reg_list);
568 frame->flags = 0;
523 frame->prev = prev; 569 frame->prev = prev;
570 frame->return_addr = 0;
524 571
525 fde = dwarf_lookup_fde(pc); 572 fde = dwarf_lookup_fde(pc);
526 if (!fde) { 573 if (!fde) {
@@ -540,7 +587,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
540 * case above, which sucks because we could print a 587 * case above, which sucks because we could print a
541 * warning here. 588 * warning here.
542 */ 589 */
543 return NULL; 590 goto bail;
544 } 591 }
545 592
546 cie = dwarf_lookup_cie(fde->cie_pointer); 593 cie = dwarf_lookup_cie(fde->cie_pointer);
@@ -560,10 +607,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
560 switch (frame->flags) { 607 switch (frame->flags) {
561 case DWARF_FRAME_CFA_REG_OFFSET: 608 case DWARF_FRAME_CFA_REG_OFFSET:
562 if (prev) { 609 if (prev) {
563 BUG_ON(!prev->regs[frame->cfa_register].flags); 610 reg = dwarf_frame_reg(prev, frame->cfa_register);
611 UNWINDER_BUG_ON(!reg);
612 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
564 613
565 addr = prev->cfa; 614 addr = prev->cfa + reg->addr;
566 addr += prev->regs[frame->cfa_register].addr;
567 frame->cfa = __raw_readl(addr); 615 frame->cfa = __raw_readl(addr);
568 616
569 } else { 617 } else {
@@ -580,27 +628,30 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
580 frame->cfa += frame->cfa_offset; 628 frame->cfa += frame->cfa_offset;
581 break; 629 break;
582 default: 630 default:
583 BUG(); 631 UNWINDER_BUG();
584 } 632 }
585 633
586 /* If we haven't seen the return address reg, we're screwed. */ 634 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
587 BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
588 635
589 for (i = 0; i <= frame->num_regs; i++) { 636 /*
590 struct dwarf_reg *reg = &frame->regs[i]; 637 * If we haven't seen the return address register or the return
638 * address column is undefined then we must assume that this is
639 * the end of the callstack.
640 */
641 if (!reg || reg->flags == DWARF_UNDEFINED)
642 goto bail;
591 643
592 if (!reg->flags) 644 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
593 continue;
594 645
595 offset = reg->addr; 646 addr = frame->cfa + reg->addr;
596 offset += frame->cfa;
597 }
598
599 addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
600 frame->return_addr = __raw_readl(addr); 647 frame->return_addr = __raw_readl(addr);
601 648
602 frame->next = dwarf_unwind_stack(frame->return_addr, frame);
603 return frame; 649 return frame;
650
651bail:
652 dwarf_frame_free_regs(frame);
653 mempool_free(frame, dwarf_frame_pool);
654 return NULL;
604} 655}
605 656
606static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 657static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
@@ -625,7 +676,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
625 cie->cie_pointer = (unsigned long)entry; 676 cie->cie_pointer = (unsigned long)entry;
626 677
627 cie->version = *(char *)p++; 678 cie->version = *(char *)p++;
628 BUG_ON(cie->version != 1); 679 UNWINDER_BUG_ON(cie->version != 1);
629 680
630 cie->augmentation = p; 681 cie->augmentation = p;
631 p += strlen(cie->augmentation) + 1; 682 p += strlen(cie->augmentation) + 1;
@@ -655,7 +706,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
655 count = dwarf_read_uleb128(p, &length); 706 count = dwarf_read_uleb128(p, &length);
656 p += count; 707 p += count;
657 708
658 BUG_ON((unsigned char *)p > end); 709 UNWINDER_BUG_ON((unsigned char *)p > end);
659 710
660 cie->initial_instructions = p + length; 711 cie->initial_instructions = p + length;
661 cie->augmentation++; 712 cie->augmentation++;
@@ -683,16 +734,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
683 * routine in the CIE 734 * routine in the CIE
684 * augmentation. 735 * augmentation.
685 */ 736 */
686 BUG(); 737 UNWINDER_BUG();
687 } else if (*cie->augmentation == 'S') { 738 } else if (*cie->augmentation == 'S') {
688 BUG(); 739 UNWINDER_BUG();
689 } else { 740 } else {
690 /* 741 /*
691 * Unknown augmentation. Assume 742 * Unknown augmentation. Assume
692 * 'z' augmentation. 743 * 'z' augmentation.
693 */ 744 */
694 p = cie->initial_instructions; 745 p = cie->initial_instructions;
695 BUG_ON(!p); 746 UNWINDER_BUG_ON(!p);
696 break; 747 break;
697 } 748 }
698 } 749 }
@@ -709,7 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
709} 760}
710 761
711static int dwarf_parse_fde(void *entry, u32 entry_type, 762static int dwarf_parse_fde(void *entry, u32 entry_type,
712 void *start, unsigned long len) 763 void *start, unsigned long len,
764 unsigned char *end)
713{ 765{
714 struct dwarf_fde *fde; 766 struct dwarf_fde *fde;
715 struct dwarf_cie *cie; 767 struct dwarf_cie *cie;
@@ -756,7 +808,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
756 808
757 /* Call frame instructions. */ 809 /* Call frame instructions. */
758 fde->instructions = p; 810 fde->instructions = p;
759 fde->end = start + len; 811 fde->end = end;
760 812
761 /* Add to list. */ 813 /* Add to list. */
762 spin_lock_irqsave(&dwarf_fde_lock, flags); 814 spin_lock_irqsave(&dwarf_fde_lock, flags);
@@ -766,17 +818,33 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
766 return 0; 818 return 0;
767} 819}
768 820
769static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, 821static void dwarf_unwinder_dump(struct task_struct *task,
822 struct pt_regs *regs,
770 unsigned long *sp, 823 unsigned long *sp,
771 const struct stacktrace_ops *ops, void *data) 824 const struct stacktrace_ops *ops,
825 void *data)
772{ 826{
773 struct dwarf_frame *frame; 827 struct dwarf_frame *frame, *_frame;
828 unsigned long return_addr;
774 829
775 frame = dwarf_unwind_stack(0, NULL); 830 _frame = NULL;
831 return_addr = 0;
776 832
777 while (frame && frame->return_addr) { 833 while (1) {
778 ops->address(data, frame->return_addr, 1); 834 frame = dwarf_unwind_stack(return_addr, _frame);
779 frame = frame->next; 835
836 if (_frame) {
837 dwarf_frame_free_regs(_frame);
838 mempool_free(_frame, dwarf_frame_pool);
839 }
840
841 _frame = frame;
842
843 if (!frame || !frame->return_addr)
844 break;
845
846 return_addr = frame->return_addr;
847 ops->address(data, return_addr, 1);
780 } 848 }
781} 849}
782 850
@@ -801,6 +869,9 @@ static void dwarf_unwinder_cleanup(void)
801 869
802 list_for_each_entry(fde, &dwarf_fde_list, link) 870 list_for_each_entry(fde, &dwarf_fde_list, link)
803 kfree(fde); 871 kfree(fde);
872
873 kmem_cache_destroy(dwarf_reg_cachep);
874 kmem_cache_destroy(dwarf_frame_cachep);
804} 875}
805 876
806/** 877/**
@@ -827,6 +898,21 @@ static int __init dwarf_unwinder_init(void)
827 f_entries = 0; 898 f_entries = 0;
828 entry = &__start_eh_frame; 899 entry = &__start_eh_frame;
829 900
901 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
902 sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
903 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
904 sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
905
906 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
907 mempool_alloc_slab,
908 mempool_free_slab,
909 dwarf_frame_cachep);
910
911 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
912 mempool_alloc_slab,
913 mempool_free_slab,
914 dwarf_reg_cachep);
915
830 while ((char *)entry < __stop_eh_frame) { 916 while ((char *)entry < __stop_eh_frame) {
831 p = entry; 917 p = entry;
832 918
@@ -856,7 +942,7 @@ static int __init dwarf_unwinder_init(void)
856 else 942 else
857 c_entries++; 943 c_entries++;
858 } else { 944 } else {
859 err = dwarf_parse_fde(entry, entry_type, p, len); 945 err = dwarf_parse_fde(entry, entry_type, p, len, end);
860 if (err < 0) 946 if (err < 0)
861 goto out; 947 goto out;
862 else 948 else