aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-15 09:05:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-15 09:57:49 -0400
commit75f937f24bd9c003dcb9d7d5509f23459f1f6000 (patch)
tree859d5ebe9b47b952d13873d7a2a580012682aeb0 /kernel/perf_counter.c
parent613d8602292165f86ba1969784fea01a06d55900 (diff)
perf_counter: Fix ctx->mutex vs counter->mutex inversion
Simon triggered a lockdep inversion report about us taking ctx->mutex vs counter->mutex in inverse orders. Fix that up. Reported-by: Simon Holm Thøgersen <odie@cs.aau.dk> Tested-by: Simon Holm Thøgersen <odie@cs.aau.dk> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e914daff03b5..109a95723859 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1620,22 +1620,6 @@ static void perf_counter_reset(struct perf_counter *counter)
1620 perf_counter_update_userpage(counter); 1620 perf_counter_update_userpage(counter);
1621} 1621}
1622 1622
1623static void perf_counter_for_each_sibling(struct perf_counter *counter,
1624 void (*func)(struct perf_counter *))
1625{
1626 struct perf_counter_context *ctx = counter->ctx;
1627 struct perf_counter *sibling;
1628
1629 WARN_ON_ONCE(ctx->parent_ctx);
1630 mutex_lock(&ctx->mutex);
1631 counter = counter->group_leader;
1632
1633 func(counter);
1634 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1635 func(sibling);
1636 mutex_unlock(&ctx->mutex);
1637}
1638
1639/* 1623/*
1640 * Holding the top-level counter's child_mutex means that any 1624 * Holding the top-level counter's child_mutex means that any
1641 * descendant process that has inherited this counter will block 1625 * descendant process that has inherited this counter will block
@@ -1658,14 +1642,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
1658static void perf_counter_for_each(struct perf_counter *counter, 1642static void perf_counter_for_each(struct perf_counter *counter,
1659 void (*func)(struct perf_counter *)) 1643 void (*func)(struct perf_counter *))
1660{ 1644{
1661 struct perf_counter *child; 1645 struct perf_counter_context *ctx = counter->ctx;
1646 struct perf_counter *sibling;
1662 1647
1663 WARN_ON_ONCE(counter->ctx->parent_ctx); 1648 WARN_ON_ONCE(ctx->parent_ctx);
1664 mutex_lock(&counter->child_mutex); 1649 mutex_lock(&ctx->mutex);
1665 perf_counter_for_each_sibling(counter, func); 1650 counter = counter->group_leader;
1666 list_for_each_entry(child, &counter->child_list, child_list) 1651
1667 perf_counter_for_each_sibling(child, func); 1652 perf_counter_for_each_child(counter, func);
1668 mutex_unlock(&counter->child_mutex); 1653 func(counter);
1654 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1655 perf_counter_for_each_child(counter, func);
1656 mutex_unlock(&ctx->mutex);
1669} 1657}
1670 1658
1671static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1659static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)