diff options
author | Yong Zhang <yong.zhang@windriver.com> | 2010-05-04 02:16:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:27:26 -0400 |
commit | 4726f2a617ebd868a4fdeb5679613b897e5f1676 (patch) | |
tree | c9eea44c66f98123802d99aad5b3cce93626eda8 /kernel/lockdep.c | |
parent | 54d47a2be5e7f928fb77b2f5a0761f6bd3c9dbff (diff) |
lockdep: Reduce stack_trace usage
When calling check_prevs_add(), if all validations passed
add_lock_to_list() will add new lock to dependency tree and
alloc stack_trace for each list_entry.
But at this time, we are always on the same stack, so stack_trace
for each list_entry has the same value. This is redundant and eats
up lots of memory which could lead to warning on low
MAX_STACK_TRACE_ENTRIES.
Use one copy of stack_trace instead.
V2: As suggested by Peter Zijlstra, move save_trace() from
check_prevs_add() to check_prev_add().
Add tracking for trylock dependence which is also redundant.
Signed-off-by: Yong Zhang <yong.zhang0@windriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100504065711.GC10784@windriver.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 29 |
1 files changed, 21 insertions, 8 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 9cf79858fd82..51080807dc8c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -805,7 +805,8 @@ static struct lock_list *alloc_list_entry(void) | |||
805 | * Add a new dependency to the head of the list: | 805 | * Add a new dependency to the head of the list: |
806 | */ | 806 | */ |
807 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | 807 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, |
808 | struct list_head *head, unsigned long ip, int distance) | 808 | struct list_head *head, unsigned long ip, |
809 | int distance, struct stack_trace *trace) | ||
809 | { | 810 | { |
810 | struct lock_list *entry; | 811 | struct lock_list *entry; |
811 | /* | 812 | /* |
@@ -816,11 +817,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
816 | if (!entry) | 817 | if (!entry) |
817 | return 0; | 818 | return 0; |
818 | 819 | ||
819 | if (!save_trace(&entry->trace)) | ||
820 | return 0; | ||
821 | |||
822 | entry->class = this; | 820 | entry->class = this; |
823 | entry->distance = distance; | 821 | entry->distance = distance; |
822 | entry->trace = *trace; | ||
824 | /* | 823 | /* |
825 | * Since we never remove from the dependency list, the list can | 824 | * Since we never remove from the dependency list, the list can |
826 | * be walked lockless by other CPUs, it's only allocation | 825 | * be walked lockless by other CPUs, it's only allocation |
@@ -1622,12 +1621,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1622 | */ | 1621 | */ |
1623 | static int | 1622 | static int |
1624 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | 1623 | check_prev_add(struct task_struct *curr, struct held_lock *prev, |
1625 | struct held_lock *next, int distance) | 1624 | struct held_lock *next, int distance, int trylock_loop) |
1626 | { | 1625 | { |
1627 | struct lock_list *entry; | 1626 | struct lock_list *entry; |
1628 | int ret; | 1627 | int ret; |
1629 | struct lock_list this; | 1628 | struct lock_list this; |
1630 | struct lock_list *uninitialized_var(target_entry); | 1629 | struct lock_list *uninitialized_var(target_entry); |
1630 | /* | ||
1631 | * Static variable, serialized by the graph_lock(). | ||
1632 | * | ||
1633 | * We use this static variable to save the stack trace in case | ||
1634 | * we call into this function multiple times due to encountering | ||
1635 | * trylocks in the held lock stack. | ||
1636 | */ | ||
1637 | static struct stack_trace trace; | ||
1631 | 1638 | ||
1632 | /* | 1639 | /* |
1633 | * Prove that the new <prev> -> <next> dependency would not | 1640 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1675,20 +1682,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1675 | } | 1682 | } |
1676 | } | 1683 | } |
1677 | 1684 | ||
1685 | if (!trylock_loop && !save_trace(&trace)) | ||
1686 | return 0; | ||
1687 | |||
1678 | /* | 1688 | /* |
1679 | * Ok, all validations passed, add the new lock | 1689 | * Ok, all validations passed, add the new lock |
1680 | * to the previous lock's dependency list: | 1690 | * to the previous lock's dependency list: |
1681 | */ | 1691 | */ |
1682 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), | 1692 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
1683 | &hlock_class(prev)->locks_after, | 1693 | &hlock_class(prev)->locks_after, |
1684 | next->acquire_ip, distance); | 1694 | next->acquire_ip, distance, &trace); |
1685 | 1695 | ||
1686 | if (!ret) | 1696 | if (!ret) |
1687 | return 0; | 1697 | return 0; |
1688 | 1698 | ||
1689 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), | 1699 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
1690 | &hlock_class(next)->locks_before, | 1700 | &hlock_class(next)->locks_before, |
1691 | next->acquire_ip, distance); | 1701 | next->acquire_ip, distance, &trace); |
1692 | if (!ret) | 1702 | if (!ret) |
1693 | return 0; | 1703 | return 0; |
1694 | 1704 | ||
@@ -1718,6 +1728,7 @@ static int | |||
1718 | check_prevs_add(struct task_struct *curr, struct held_lock *next) | 1728 | check_prevs_add(struct task_struct *curr, struct held_lock *next) |
1719 | { | 1729 | { |
1720 | int depth = curr->lockdep_depth; | 1730 | int depth = curr->lockdep_depth; |
1731 | int trylock_loop = 0; | ||
1721 | struct held_lock *hlock; | 1732 | struct held_lock *hlock; |
1722 | 1733 | ||
1723 | /* | 1734 | /* |
@@ -1743,7 +1754,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1743 | * added: | 1754 | * added: |
1744 | */ | 1755 | */ |
1745 | if (hlock->read != 2) { | 1756 | if (hlock->read != 2) { |
1746 | if (!check_prev_add(curr, hlock, next, distance)) | 1757 | if (!check_prev_add(curr, hlock, next, |
1758 | distance, trylock_loop)) | ||
1747 | return 0; | 1759 | return 0; |
1748 | /* | 1760 | /* |
1749 | * Stop after the first non-trylock entry, | 1761 | * Stop after the first non-trylock entry, |
@@ -1766,6 +1778,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1766 | if (curr->held_locks[depth].irq_context != | 1778 | if (curr->held_locks[depth].irq_context != |
1767 | curr->held_locks[depth-1].irq_context) | 1779 | curr->held_locks[depth-1].irq_context) |
1768 | break; | 1780 | break; |
1781 | trylock_loop = 1; | ||
1769 | } | 1782 | } |
1770 | return 1; | 1783 | return 1; |
1771 | out_bug: | 1784 | out_bug: |