diff options
author | Phil Carmody <ext-phil.2.carmody@nokia.com> | 2010-09-14 16:35:39 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-09-14 16:35:39 -0400 |
commit | 04a344069052d94b4ea1f95d930cbfa39b4ca292 (patch) | |
tree | 1caa4caa3d023435819c6dc5ed6f90701a8c97e1 /arch/ia64/kernel/unwind.c | |
parent | 747584be04bb98a856bab5cd1bfe56d341881b83 (diff) |
[IA64] unwind - optimise linked-list searches for modules
It's clear from the comment in the code about keeping the
kernel's unwind table at the front of the list that some
attention has been paid to access patterns. Tests on other
architectures have shown that a move-to-front optimisation
improves searches dramatically.
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/unwind.c')
-rw-r--r-- | arch/ia64/kernel/unwind.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index f47217b1bb37..fed6afa2e8a9 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info) | |||
1531 | struct unw_labeled_state *ls, *next; | 1531 | struct unw_labeled_state *ls, *next; |
1532 | unsigned long ip = info->ip; | 1532 | unsigned long ip = info->ip; |
1533 | struct unw_state_record sr; | 1533 | struct unw_state_record sr; |
1534 | struct unw_table *table; | 1534 | struct unw_table *table, *prev; |
1535 | struct unw_reg_info *r; | 1535 | struct unw_reg_info *r; |
1536 | struct unw_insn insn; | 1536 | struct unw_insn insn; |
1537 | u8 *dp, *desc_end; | 1537 | u8 *dp, *desc_end; |
@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info) | |||
1560 | 1560 | ||
1561 | STAT(parse_start = ia64_get_itc()); | 1561 | STAT(parse_start = ia64_get_itc()); |
1562 | 1562 | ||
1563 | prev = NULL; | ||
1563 | for (table = unw.tables; table; table = table->next) { | 1564 | for (table = unw.tables; table; table = table->next) { |
1564 | if (ip >= table->start && ip < table->end) { | 1565 | if (ip >= table->start && ip < table->end) { |
1566 | /* | ||
1567 | * Leave the kernel unwind table at the very front, | ||
1568 | * lest moving it breaks some assumption elsewhere. | ||
1569 | * Otherwise, move the matching table to the second | ||
1570 | * position in the list so that traversals can benefit | ||
1571 | * from commonality in backtrace paths. | ||
1572 | */ | ||
1573 | if (prev && prev != unw.tables) { | ||
1574 | /* unw is safe - we're already spinlocked */ | ||
1575 | prev->next = table->next; | ||
1576 | table->next = unw.tables->next; | ||
1577 | unw.tables->next = table; | ||
1578 | } | ||
1565 | e = lookup(table, ip - table->segment_base); | 1579 | e = lookup(table, ip - table->segment_base); |
1566 | break; | 1580 | break; |
1567 | } | 1581 | } |
1582 | prev = table; | ||
1568 | } | 1583 | } |
1569 | if (!e) { | 1584 | if (!e) { |
1570 | /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ | 1585 | /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ |