aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/dwarf.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/dwarf.c')
-rw-r--r--arch/sh/kernel/dwarf.c201
1 files changed, 136 insertions, 65 deletions
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index d0652153f576..e4810375207d 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/mempool.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
21#include <asm/dwarf.h> 22#include <asm/dwarf.h>
22#include <asm/unwinder.h> 23#include <asm/unwinder.h>
@@ -25,6 +26,17 @@
25#include <asm/dwarf.h> 26#include <asm/dwarf.h>
26#include <asm/stacktrace.h> 27#include <asm/stacktrace.h>
27 28
29/* Reserve enough memory for two stack frames */
30#define DWARF_FRAME_MIN_REQ 2
31/* ... with 4 registers per frame. */
32#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
33
34static struct kmem_cache *dwarf_frame_cachep;
35static mempool_t *dwarf_frame_pool;
36
37static struct kmem_cache *dwarf_reg_cachep;
38static mempool_t *dwarf_reg_pool;
39
28static LIST_HEAD(dwarf_cie_list); 40static LIST_HEAD(dwarf_cie_list);
29static DEFINE_SPINLOCK(dwarf_cie_lock); 41static DEFINE_SPINLOCK(dwarf_cie_lock);
30 42
@@ -33,33 +45,25 @@ static DEFINE_SPINLOCK(dwarf_fde_lock);
33 45
34static struct dwarf_cie *cached_cie; 46static struct dwarf_cie *cached_cie;
35 47
36/* 48/**
37 * Figure out whether we need to allocate some dwarf registers. If dwarf 49 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
38 * registers have already been allocated then we may need to realloc 50 * @frame: the DWARF frame whose list of registers we insert on
39 * them. "reg" is a register number that we need to be able to access 51 * @reg_num: the register number
40 * after this call. 52 *
53 * Allocate space for, and initialise, a dwarf reg from
54 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
55 * dwarf registers for @frame.
41 * 56 *
42 * Register numbers start at zero, therefore we need to allocate space 57 * Return the initialised DWARF reg.
43 * for "reg" + 1 registers.
44 */ 58 */
45static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, 59static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
46 unsigned int reg) 60 unsigned int reg_num)
47{ 61{
48 struct dwarf_reg *regs; 62 struct dwarf_reg *reg;
49 unsigned int num_regs = reg + 1;
50 size_t new_size;
51 size_t old_size;
52 63
53 new_size = num_regs * sizeof(*regs); 64 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
54 old_size = frame->num_regs * sizeof(*regs); 65 if (!reg) {
55 66 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
56 /* Fast path: don't allocate any regs if we've already got enough. */
57 if (frame->num_regs >= num_regs)
58 return;
59
60 regs = kzalloc(new_size, GFP_ATOMIC);
61 if (!regs) {
62 printk(KERN_WARNING "Unable to allocate DWARF registers\n");
63 /* 67 /*
64 * Let's just bomb hard here, we have no way to 68 * Let's just bomb hard here, we have no way to
65 * gracefully recover. 69 * gracefully recover.
@@ -67,13 +71,44 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
67 BUG(); 71 BUG();
68 } 72 }
69 73
70 if (frame->regs) { 74 reg->number = reg_num;
71 memcpy(regs, frame->regs, old_size); 75 reg->addr = 0;
72 kfree(frame->regs); 76 reg->flags = 0;
77
78 list_add(&reg->link, &frame->reg_list);
79
80 return reg;
81}
82
83static void dwarf_frame_free_regs(struct dwarf_frame *frame)
84{
85 struct dwarf_reg *reg, *n;
86
87 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
88 list_del(&reg->link);
89 mempool_free(reg, dwarf_reg_pool);
90 }
91}
92
93/**
94 * dwarf_frame_reg - return a DWARF register
95 * @frame: the DWARF frame to search in for @reg_num
96 * @reg_num: the register number to search for
97 *
98 * Lookup and return the dwarf reg @reg_num for this frame. Return
99 * NULL if @reg_num is an register invalid number.
100 */
101static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
102 unsigned int reg_num)
103{
104 struct dwarf_reg *reg;
105
106 list_for_each_entry(reg, &frame->reg_list, link) {
107 if (reg->number == reg_num)
108 return reg;
73 } 109 }
74 110
75 frame->regs = regs; 111 return NULL;
76 frame->num_regs = num_regs;
77} 112}
78 113
79/** 114/**
@@ -347,6 +382,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
347 unsigned char insn; 382 unsigned char insn;
348 unsigned char *current_insn; 383 unsigned char *current_insn;
349 unsigned int count, delta, reg, expr_len, offset; 384 unsigned int count, delta, reg, expr_len, offset;
385 struct dwarf_reg *regp;
350 386
351 current_insn = insn_start; 387 current_insn = insn_start;
352 388
@@ -369,9 +405,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
369 count = dwarf_read_uleb128(current_insn, &offset); 405 count = dwarf_read_uleb128(current_insn, &offset);
370 current_insn += count; 406 current_insn += count;
371 offset *= cie->data_alignment_factor; 407 offset *= cie->data_alignment_factor;
372 dwarf_frame_alloc_regs(frame, reg); 408 regp = dwarf_frame_alloc_reg(frame, reg);
373 frame->regs[reg].addr = offset; 409 regp->addr = offset;
374 frame->regs[reg].flags |= DWARF_REG_OFFSET; 410 regp->flags |= DWARF_REG_OFFSET;
375 continue; 411 continue;
376 /* NOTREACHED */ 412 /* NOTREACHED */
377 case DW_CFA_restore: 413 case DW_CFA_restore:
@@ -453,17 +489,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
453 count = dwarf_read_leb128(current_insn, &offset); 489 count = dwarf_read_leb128(current_insn, &offset);
454 current_insn += count; 490 current_insn += count;
455 offset *= cie->data_alignment_factor; 491 offset *= cie->data_alignment_factor;
456 dwarf_frame_alloc_regs(frame, reg); 492 regp = dwarf_frame_alloc_reg(frame, reg);
457 frame->regs[reg].flags |= DWARF_REG_OFFSET; 493 regp->flags |= DWARF_REG_OFFSET;
458 frame->regs[reg].addr = offset; 494 regp->addr = offset;
459 break; 495 break;
460 case DW_CFA_val_offset: 496 case DW_CFA_val_offset:
461 count = dwarf_read_uleb128(current_insn, &reg); 497 count = dwarf_read_uleb128(current_insn, &reg);
462 current_insn += count; 498 current_insn += count;
463 count = dwarf_read_leb128(current_insn, &offset); 499 count = dwarf_read_leb128(current_insn, &offset);
464 offset *= cie->data_alignment_factor; 500 offset *= cie->data_alignment_factor;
465 frame->regs[reg].flags |= DWARF_REG_OFFSET; 501 regp = dwarf_frame_alloc_reg(frame, reg);
466 frame->regs[reg].addr = offset; 502 regp->flags |= DWARF_REG_OFFSET;
503 regp->addr = offset;
467 break; 504 break;
468 case DW_CFA_GNU_args_size: 505 case DW_CFA_GNU_args_size:
469 count = dwarf_read_uleb128(current_insn, &offset); 506 count = dwarf_read_uleb128(current_insn, &offset);
@@ -474,9 +511,10 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
474 current_insn += count; 511 current_insn += count;
475 count = dwarf_read_uleb128(current_insn, &offset); 512 count = dwarf_read_uleb128(current_insn, &offset);
476 offset *= cie->data_alignment_factor; 513 offset *= cie->data_alignment_factor;
477 dwarf_frame_alloc_regs(frame, reg); 514
478 frame->regs[reg].flags |= DWARF_REG_OFFSET; 515 regp = dwarf_frame_alloc_reg(frame, reg);
479 frame->regs[reg].addr = -offset; 516 regp->flags |= DWARF_REG_OFFSET;
517 regp->addr = -offset;
480 break; 518 break;
481 default: 519 default:
482 pr_debug("unhandled DWARF instruction 0x%x\n", insn); 520 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
@@ -502,8 +540,8 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
502 struct dwarf_frame *frame; 540 struct dwarf_frame *frame;
503 struct dwarf_cie *cie; 541 struct dwarf_cie *cie;
504 struct dwarf_fde *fde; 542 struct dwarf_fde *fde;
543 struct dwarf_reg *reg;
505 unsigned long addr; 544 unsigned long addr;
506 int i, offset;
507 545
508 /* 546 /*
509 * If this is the first invocation of this recursive function we 547 * If this is the first invocation of this recursive function we
@@ -516,11 +554,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
516 if (!pc && !prev) 554 if (!pc && !prev)
517 pc = (unsigned long)current_text_addr(); 555 pc = (unsigned long)current_text_addr();
518 556
519 frame = kzalloc(sizeof(*frame), GFP_ATOMIC); 557 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
520 if (!frame) 558 if (!frame) {
521 return NULL; 559 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
560 BUG();
561 }
522 562
563 INIT_LIST_HEAD(&frame->reg_list);
564 frame->flags = 0;
523 frame->prev = prev; 565 frame->prev = prev;
566 frame->return_addr = 0;
524 567
525 fde = dwarf_lookup_fde(pc); 568 fde = dwarf_lookup_fde(pc);
526 if (!fde) { 569 if (!fde) {
@@ -540,7 +583,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
540 * case above, which sucks because we could print a 583 * case above, which sucks because we could print a
541 * warning here. 584 * warning here.
542 */ 585 */
543 return NULL; 586 goto bail;
544 } 587 }
545 588
546 cie = dwarf_lookup_cie(fde->cie_pointer); 589 cie = dwarf_lookup_cie(fde->cie_pointer);
@@ -560,10 +603,10 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
560 switch (frame->flags) { 603 switch (frame->flags) {
561 case DWARF_FRAME_CFA_REG_OFFSET: 604 case DWARF_FRAME_CFA_REG_OFFSET:
562 if (prev) { 605 if (prev) {
563 BUG_ON(!prev->regs[frame->cfa_register].flags); 606 reg = dwarf_frame_reg(prev, frame->cfa_register);
607 BUG_ON(!reg);
564 608
565 addr = prev->cfa; 609 addr = prev->cfa + reg->addr;
566 addr += prev->regs[frame->cfa_register].addr;
567 frame->cfa = __raw_readl(addr); 610 frame->cfa = __raw_readl(addr);
568 611
569 } else { 612 } else {
@@ -584,23 +627,18 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
584 } 627 }
585 628
586 /* If we haven't seen the return address reg, we're screwed. */ 629 /* If we haven't seen the return address reg, we're screwed. */
587 BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); 630 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
588 631 BUG_ON(!reg);
589 for (i = 0; i <= frame->num_regs; i++) {
590 struct dwarf_reg *reg = &frame->regs[i];
591
592 if (!reg->flags)
593 continue;
594 632
595 offset = reg->addr; 633 addr = frame->cfa + reg->addr;
596 offset += frame->cfa;
597 }
598
599 addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
600 frame->return_addr = __raw_readl(addr); 634 frame->return_addr = __raw_readl(addr);
601 635
602 frame->next = dwarf_unwind_stack(frame->return_addr, frame);
603 return frame; 636 return frame;
637
638bail:
639 dwarf_frame_free_regs(frame);
640 mempool_free(frame, dwarf_frame_pool);
641 return NULL;
604} 642}
605 643
606static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 644static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
@@ -770,14 +808,29 @@ static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
770 unsigned long *sp, 808 unsigned long *sp,
771 const struct stacktrace_ops *ops, void *data) 809 const struct stacktrace_ops *ops, void *data)
772{ 810{
773 struct dwarf_frame *frame; 811 struct dwarf_frame *frame, *_frame;
812 unsigned long return_addr;
813
814 _frame = NULL;
815 return_addr = 0;
774 816
775 frame = dwarf_unwind_stack(0, NULL); 817 while (1) {
818 frame = dwarf_unwind_stack(return_addr, _frame);
819
820 if (_frame) {
821 dwarf_frame_free_regs(_frame);
822 mempool_free(_frame, dwarf_frame_pool);
823 }
824
825 _frame = frame;
826
827 if (!frame || !frame->return_addr)
828 break;
776 829
777 while (frame && frame->return_addr) { 830 return_addr = frame->return_addr;
778 ops->address(data, frame->return_addr, 1); 831 ops->address(data, return_addr, 1);
779 frame = frame->next;
780 } 832 }
833
781} 834}
782 835
783static struct unwinder dwarf_unwinder = { 836static struct unwinder dwarf_unwinder = {
@@ -801,6 +854,9 @@ static void dwarf_unwinder_cleanup(void)
801 854
802 list_for_each_entry(fde, &dwarf_fde_list, link) 855 list_for_each_entry(fde, &dwarf_fde_list, link)
803 kfree(fde); 856 kfree(fde);
857
858 kmem_cache_destroy(dwarf_reg_cachep);
859 kmem_cache_destroy(dwarf_frame_cachep);
804} 860}
805 861
806/** 862/**
@@ -827,6 +883,21 @@ static int __init dwarf_unwinder_init(void)
827 f_entries = 0; 883 f_entries = 0;
828 entry = &__start_eh_frame; 884 entry = &__start_eh_frame;
829 885
886 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
887 sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
888 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
889 sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
890
891 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
892 mempool_alloc_slab,
893 mempool_free_slab,
894 dwarf_frame_cachep);
895
896 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
897 mempool_alloc_slab,
898 mempool_free_slab,
899 dwarf_reg_cachep);
900
830 while ((char *)entry < __stop_eh_frame) { 901 while ((char *)entry < __stop_eh_frame) {
831 p = entry; 902 p = entry;
832 903