aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-11-26 09:07:02 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-26 09:07:02 -0500
commit6c869e772c72d509d0db243a56c205ef48a29baf (patch)
tree9a290f1742526a8816f94560cb09bc0a09c910de /kernel/perf_event.c
parente4e91ac410356da3a518188f371e9d3b52ee38ee (diff)
parentee6dcfa40a50fe12a3ae0fb4d2653c66c3ed6556 (diff)
Merge branch 'perf/urgent' into perf/core
Conflicts: arch/x86/kernel/apic/hw_nmi.c Merge reason: Resolve conflict, queue up dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c93
1 files changed, 77 insertions, 16 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 40c3aab648a1..43f757ccf831 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -31,6 +31,7 @@
31#include <linux/kernel_stat.h> 31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/ftrace_event.h> 33#include <linux/ftrace_event.h>
34#include <linux/hw_breakpoint.h>
34 35
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
36 37
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
1286{ 1287{
1287 int ctxn; 1288 int ctxn;
1288 1289
1289 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1290
1291 for_each_task_context_nr(ctxn) 1290 for_each_task_context_nr(ctxn)
1292 perf_event_context_sched_out(task, ctxn, next); 1291 perf_event_context_sched_out(task, ctxn, next);
1293} 1292}
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
1621{ 1620{
1622 raw_spin_lock(&ctx->lock); 1621 raw_spin_lock(&ctx->lock);
1623 1622
1624 /* Rotate the first entry last of non-pinned groups */ 1623 /*
1625 list_rotate_left(&ctx->flexible_groups); 1624 * Rotate the first entry last of non-pinned groups. Rotation might be
1625 * disabled by the inheritance code.
1626 */
1627 if (!ctx->rotate_disable)
1628 list_rotate_left(&ctx->flexible_groups);
1626 1629
1627 raw_spin_unlock(&ctx->lock); 1630 raw_spin_unlock(&ctx->lock);
1628} 1631}
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
2234 raw_spin_unlock_irq(&ctx->lock); 2237 raw_spin_unlock_irq(&ctx->lock);
2235 mutex_unlock(&ctx->mutex); 2238 mutex_unlock(&ctx->mutex);
2236 2239
2237 mutex_lock(&event->owner->perf_event_mutex);
2238 list_del_init(&event->owner_entry);
2239 mutex_unlock(&event->owner->perf_event_mutex);
2240 put_task_struct(event->owner);
2241
2242 free_event(event); 2240 free_event(event);
2243 2241
2244 return 0; 2242 return 0;
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2251static int perf_release(struct inode *inode, struct file *file) 2249static int perf_release(struct inode *inode, struct file *file)
2252{ 2250{
2253 struct perf_event *event = file->private_data; 2251 struct perf_event *event = file->private_data;
2252 struct task_struct *owner;
2254 2253
2255 file->private_data = NULL; 2254 file->private_data = NULL;
2256 2255
2256 rcu_read_lock();
2257 owner = ACCESS_ONCE(event->owner);
2258 /*
2259 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2260 * !owner it means the list deletion is complete and we can indeed
2261 * free this event, otherwise we need to serialize on
2262 * owner->perf_event_mutex.
2263 */
2264 smp_read_barrier_depends();
2265 if (owner) {
2266 /*
2267 * Since delayed_put_task_struct() also drops the last
2268 * task reference we can safely take a new reference
2269 * while holding the rcu_read_lock().
2270 */
2271 get_task_struct(owner);
2272 }
2273 rcu_read_unlock();
2274
2275 if (owner) {
2276 mutex_lock(&owner->perf_event_mutex);
2277 /*
2278 * We have to re-check the event->owner field, if it is cleared
2279 * we raced with perf_event_exit_task(), acquiring the mutex
2280 * ensured they're done, and we can proceed with freeing the
2281 * event.
2282 */
2283 if (event->owner)
2284 list_del_init(&event->owner_entry);
2285 mutex_unlock(&owner->perf_event_mutex);
2286 put_task_struct(owner);
2287 }
2288
2257 return perf_event_release_kernel(event); 2289 return perf_event_release_kernel(event);
2258} 2290}
2259 2291
@@ -5668,7 +5700,7 @@ SYSCALL_DEFINE5(perf_event_open,
5668 mutex_unlock(&ctx->mutex); 5700 mutex_unlock(&ctx->mutex);
5669 5701
5670 event->owner = current; 5702 event->owner = current;
5671 get_task_struct(current); 5703
5672 mutex_lock(&current->perf_event_mutex); 5704 mutex_lock(&current->perf_event_mutex);
5673 list_add_tail(&event->owner_entry, &current->perf_event_list); 5705 list_add_tail(&event->owner_entry, &current->perf_event_list);
5674 mutex_unlock(&current->perf_event_mutex); 5706 mutex_unlock(&current->perf_event_mutex);
@@ -5736,12 +5768,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5736 ++ctx->generation; 5768 ++ctx->generation;
5737 mutex_unlock(&ctx->mutex); 5769 mutex_unlock(&ctx->mutex);
5738 5770
5739 event->owner = current;
5740 get_task_struct(current);
5741 mutex_lock(&current->perf_event_mutex);
5742 list_add_tail(&event->owner_entry, &current->perf_event_list);
5743 mutex_unlock(&current->perf_event_mutex);
5744
5745 return event; 5771 return event;
5746 5772
5747err_free: 5773err_free:
@@ -5892,8 +5918,24 @@ again:
5892 */ 5918 */
5893void perf_event_exit_task(struct task_struct *child) 5919void perf_event_exit_task(struct task_struct *child)
5894{ 5920{
5921 struct perf_event *event, *tmp;
5895 int ctxn; 5922 int ctxn;
5896 5923
5924 mutex_lock(&child->perf_event_mutex);
5925 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
5926 owner_entry) {
5927 list_del_init(&event->owner_entry);
5928
5929 /*
5930 * Ensure the list deletion is visible before we clear
5931 * the owner, closes a race against perf_release() where
5932 * we need to serialize on the owner->perf_event_mutex.
5933 */
5934 smp_wmb();
5935 event->owner = NULL;
5936 }
5937 mutex_unlock(&child->perf_event_mutex);
5938
5897 for_each_task_context_nr(ctxn) 5939 for_each_task_context_nr(ctxn)
5898 perf_event_exit_task_context(child, ctxn); 5940 perf_event_exit_task_context(child, ctxn);
5899} 5941}
@@ -6113,6 +6155,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6113 struct perf_event *event; 6155 struct perf_event *event;
6114 struct task_struct *parent = current; 6156 struct task_struct *parent = current;
6115 int inherited_all = 1; 6157 int inherited_all = 1;
6158 unsigned long flags;
6116 int ret = 0; 6159 int ret = 0;
6117 6160
6118 child->perf_event_ctxp[ctxn] = NULL; 6161 child->perf_event_ctxp[ctxn] = NULL;
@@ -6153,6 +6196,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6153 break; 6196 break;
6154 } 6197 }
6155 6198
6199 /*
6200 * We can't hold ctx->lock when iterating the ->flexible_group list due
6201 * to allocations, but we need to prevent rotation because
6202 * rotate_ctx() will change the list from interrupt context.
6203 */
6204 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6205 parent_ctx->rotate_disable = 1;
6206 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6207
6156 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 6208 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6157 ret = inherit_task_group(event, parent, parent_ctx, 6209 ret = inherit_task_group(event, parent, parent_ctx,
6158 child, ctxn, &inherited_all); 6210 child, ctxn, &inherited_all);
@@ -6160,6 +6212,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6160 break; 6212 break;
6161 } 6213 }
6162 6214
6215 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6216 parent_ctx->rotate_disable = 0;
6217 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6218
6163 child_ctx = child->perf_event_ctxp[ctxn]; 6219 child_ctx = child->perf_event_ctxp[ctxn];
6164 6220
6165 if (child_ctx && inherited_all) { 6221 if (child_ctx && inherited_all) {
@@ -6312,6 +6368,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6312 6368
6313void __init perf_event_init(void) 6369void __init perf_event_init(void)
6314{ 6370{
6371 int ret;
6372
6315 perf_event_init_all_cpus(); 6373 perf_event_init_all_cpus();
6316 init_srcu_struct(&pmus_srcu); 6374 init_srcu_struct(&pmus_srcu);
6317 perf_pmu_register(&perf_swevent); 6375 perf_pmu_register(&perf_swevent);
@@ -6319,4 +6377,7 @@ void __init perf_event_init(void)
6319 perf_pmu_register(&perf_task_clock); 6377 perf_pmu_register(&perf_task_clock);
6320 perf_tp_register(); 6378 perf_tp_register();
6321 perf_cpu_notifier(perf_cpu_notify); 6379 perf_cpu_notifier(perf_cpu_notify);
6380
6381 ret = init_hw_breakpoint();
6382 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6322} 6383}