aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 07:21:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:29:43 -0400
commit592903cdcbf606a838056bae6d03fc557806c914 (patch)
tree6851004446a405654ff3f1c39a70b313456544a5 /kernel/perf_counter.c
parentd6d020e9957745c61285ef3da9f294c5e6801f0f (diff)
perf_counter: add an event_list
I noticed that the counter_list only includes top-level counters, thus perf_swcounter_event() will miss sw-counters in groups. Since perf_swcounter_event() also wants an RCU safe list, create a new event_list that includes all counters and uses RCU list ops and use call_rcu to free the counter structure. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f9330d5827cf..8d6ecfa64c0e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -22,6 +22,7 @@
22#include <linux/perf_counter.h> 22#include <linux/perf_counter.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/vmstat.h> 24#include <linux/vmstat.h>
25#include <linux/rculist.h>
25 26
26/* 27/*
27 * Each CPU has a list of per CPU counters: 28 * Each CPU has a list of per CPU counters:
@@ -72,6 +73,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
72 list_add_tail(&counter->list_entry, &ctx->counter_list); 73 list_add_tail(&counter->list_entry, &ctx->counter_list);
73 else 74 else
74 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 75 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
76
77 list_add_rcu(&counter->event_entry, &ctx->event_list);
75} 78}
76 79
77static void 80static void
@@ -80,6 +83,7 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
80 struct perf_counter *sibling, *tmp; 83 struct perf_counter *sibling, *tmp;
81 84
82 list_del_init(&counter->list_entry); 85 list_del_init(&counter->list_entry);
86 list_del_rcu(&counter->event_entry);
83 87
84 /* 88 /*
85 * If this was a group counter with sibling counters then 89 * If this was a group counter with sibling counters then
@@ -1133,6 +1137,14 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1133 return ctx; 1137 return ctx;
1134} 1138}
1135 1139
1140static void free_counter_rcu(struct rcu_head *head)
1141{
1142 struct perf_counter *counter;
1143
1144 counter = container_of(head, struct perf_counter, rcu_head);
1145 kfree(counter);
1146}
1147
1136/* 1148/*
1137 * Called when the last reference to the file is gone. 1149 * Called when the last reference to the file is gone.
1138 */ 1150 */
@@ -1151,7 +1163,7 @@ static int perf_release(struct inode *inode, struct file *file)
1151 mutex_unlock(&counter->mutex); 1163 mutex_unlock(&counter->mutex);
1152 mutex_unlock(&ctx->mutex); 1164 mutex_unlock(&ctx->mutex);
1153 1165
1154 kfree(counter); 1166 call_rcu(&counter->rcu_head, free_counter_rcu);
1155 put_context(ctx); 1167 put_context(ctx);
1156 1168
1157 return 0; 1169 return 0;
@@ -1491,22 +1503,16 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1491 int nmi, struct pt_regs *regs) 1503 int nmi, struct pt_regs *regs)
1492{ 1504{
1493 struct perf_counter *counter; 1505 struct perf_counter *counter;
1494 unsigned long flags;
1495 1506
1496 if (list_empty(&ctx->counter_list)) 1507 if (list_empty(&ctx->event_list))
1497 return; 1508 return;
1498 1509
1499 spin_lock_irqsave(&ctx->lock, flags); 1510 rcu_read_lock();
1500 1511 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1501 /*
1502 * XXX: make counter_list RCU safe
1503 */
1504 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1505 if (perf_swcounter_match(counter, event, regs)) 1512 if (perf_swcounter_match(counter, event, regs))
1506 perf_swcounter_add(counter, nr, nmi, regs); 1513 perf_swcounter_add(counter, nr, nmi, regs);
1507 } 1514 }
1508 1515 rcu_read_unlock();
1509 spin_unlock_irqrestore(&ctx->lock, flags);
1510} 1516}
1511 1517
1512void perf_swcounter_event(enum hw_event_types event, u64 nr, 1518void perf_swcounter_event(enum hw_event_types event, u64 nr,
@@ -1846,6 +1852,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1846 1852
1847 mutex_init(&counter->mutex); 1853 mutex_init(&counter->mutex);
1848 INIT_LIST_HEAD(&counter->list_entry); 1854 INIT_LIST_HEAD(&counter->list_entry);
1855 INIT_LIST_HEAD(&counter->event_entry);
1849 INIT_LIST_HEAD(&counter->sibling_list); 1856 INIT_LIST_HEAD(&counter->sibling_list);
1850 init_waitqueue_head(&counter->waitq); 1857 init_waitqueue_head(&counter->waitq);
1851 1858
@@ -1992,6 +1999,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1992 spin_lock_init(&ctx->lock); 1999 spin_lock_init(&ctx->lock);
1993 mutex_init(&ctx->mutex); 2000 mutex_init(&ctx->mutex);
1994 INIT_LIST_HEAD(&ctx->counter_list); 2001 INIT_LIST_HEAD(&ctx->counter_list);
2002 INIT_LIST_HEAD(&ctx->event_list);
1995 ctx->task = task; 2003 ctx->task = task;
1996} 2004}
1997 2005