aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h4
-rw-r--r--kernel/perf_counter.c30
2 files changed, 23 insertions, 11 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index dfb4c7ce18b3..08c11a6afebc 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -187,6 +187,7 @@ struct file;
187struct perf_counter { 187struct perf_counter {
188#ifdef CONFIG_PERF_COUNTERS 188#ifdef CONFIG_PERF_COUNTERS
189 struct list_head list_entry; 189 struct list_head list_entry;
190 struct list_head event_entry;
190 struct list_head sibling_list; 191 struct list_head sibling_list;
191 struct perf_counter *group_leader; 192 struct perf_counter *group_leader;
192 const struct hw_perf_counter_ops *hw_ops; 193 const struct hw_perf_counter_ops *hw_ops;
@@ -220,6 +221,8 @@ struct perf_counter {
220 struct perf_data *irqdata; 221 struct perf_data *irqdata;
221 struct perf_data *usrdata; 222 struct perf_data *usrdata;
222 struct perf_data data[2]; 223 struct perf_data data[2];
224
225 struct rcu_head rcu_head;
223#endif 226#endif
224}; 227};
225 228
@@ -243,6 +246,7 @@ struct perf_counter_context {
243 struct mutex mutex; 246 struct mutex mutex;
244 247
245 struct list_head counter_list; 248 struct list_head counter_list;
249 struct list_head event_list;
246 int nr_counters; 250 int nr_counters;
247 int nr_active; 251 int nr_active;
248 int is_active; 252 int is_active;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f9330d5827cf..8d6ecfa64c0e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -22,6 +22,7 @@
22#include <linux/perf_counter.h> 22#include <linux/perf_counter.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/vmstat.h> 24#include <linux/vmstat.h>
25#include <linux/rculist.h>
25 26
26/* 27/*
27 * Each CPU has a list of per CPU counters: 28 * Each CPU has a list of per CPU counters:
@@ -72,6 +73,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
72 list_add_tail(&counter->list_entry, &ctx->counter_list); 73 list_add_tail(&counter->list_entry, &ctx->counter_list);
73 else 74 else
74 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 75 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
76
77 list_add_rcu(&counter->event_entry, &ctx->event_list);
75} 78}
76 79
77static void 80static void
@@ -80,6 +83,7 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
80 struct perf_counter *sibling, *tmp; 83 struct perf_counter *sibling, *tmp;
81 84
82 list_del_init(&counter->list_entry); 85 list_del_init(&counter->list_entry);
86 list_del_rcu(&counter->event_entry);
83 87
84 /* 88 /*
85 * If this was a group counter with sibling counters then 89 * If this was a group counter with sibling counters then
@@ -1133,6 +1137,14 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1133 return ctx; 1137 return ctx;
1134} 1138}
1135 1139
1140static void free_counter_rcu(struct rcu_head *head)
1141{
1142 struct perf_counter *counter;
1143
1144 counter = container_of(head, struct perf_counter, rcu_head);
1145 kfree(counter);
1146}
1147
1136/* 1148/*
1137 * Called when the last reference to the file is gone. 1149 * Called when the last reference to the file is gone.
1138 */ 1150 */
@@ -1151,7 +1163,7 @@ static int perf_release(struct inode *inode, struct file *file)
1151 mutex_unlock(&counter->mutex); 1163 mutex_unlock(&counter->mutex);
1152 mutex_unlock(&ctx->mutex); 1164 mutex_unlock(&ctx->mutex);
1153 1165
1154 kfree(counter); 1166 call_rcu(&counter->rcu_head, free_counter_rcu);
1155 put_context(ctx); 1167 put_context(ctx);
1156 1168
1157 return 0; 1169 return 0;
@@ -1491,22 +1503,16 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1491 int nmi, struct pt_regs *regs) 1503 int nmi, struct pt_regs *regs)
1492{ 1504{
1493 struct perf_counter *counter; 1505 struct perf_counter *counter;
1494 unsigned long flags;
1495 1506
1496 if (list_empty(&ctx->counter_list)) 1507 if (list_empty(&ctx->event_list))
1497 return; 1508 return;
1498 1509
1499 spin_lock_irqsave(&ctx->lock, flags); 1510 rcu_read_lock();
1500 1511 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1501 /*
1502 * XXX: make counter_list RCU safe
1503 */
1504 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1505 if (perf_swcounter_match(counter, event, regs)) 1512 if (perf_swcounter_match(counter, event, regs))
1506 perf_swcounter_add(counter, nr, nmi, regs); 1513 perf_swcounter_add(counter, nr, nmi, regs);
1507 } 1514 }
1508 1515 rcu_read_unlock();
1509 spin_unlock_irqrestore(&ctx->lock, flags);
1510} 1516}
1511 1517
1512void perf_swcounter_event(enum hw_event_types event, u64 nr, 1518void perf_swcounter_event(enum hw_event_types event, u64 nr,
@@ -1846,6 +1852,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1846 1852
1847 mutex_init(&counter->mutex); 1853 mutex_init(&counter->mutex);
1848 INIT_LIST_HEAD(&counter->list_entry); 1854 INIT_LIST_HEAD(&counter->list_entry);
1855 INIT_LIST_HEAD(&counter->event_entry);
1849 INIT_LIST_HEAD(&counter->sibling_list); 1856 INIT_LIST_HEAD(&counter->sibling_list);
1850 init_waitqueue_head(&counter->waitq); 1857 init_waitqueue_head(&counter->waitq);
1851 1858
@@ -1992,6 +1999,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1992 spin_lock_init(&ctx->lock); 1999 spin_lock_init(&ctx->lock);
1993 mutex_init(&ctx->mutex); 2000 mutex_init(&ctx->mutex);
1994 INIT_LIST_HEAD(&ctx->counter_list); 2001 INIT_LIST_HEAD(&ctx->counter_list);
2002 INIT_LIST_HEAD(&ctx->event_list);
1995 ctx->task = task; 2003 ctx->task = task;
1996} 2004}
1997 2005