aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c69
1 files changed, 57 insertions, 12 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index cb6c0d2af68f..671f6c8c8a32 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -31,6 +31,7 @@
31#include <linux/kernel_stat.h> 31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/ftrace_event.h> 33#include <linux/ftrace_event.h>
34#include <linux/hw_breakpoint.h>
34 35
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
36 37
@@ -2234,11 +2235,6 @@ int perf_event_release_kernel(struct perf_event *event)
2234 raw_spin_unlock_irq(&ctx->lock); 2235 raw_spin_unlock_irq(&ctx->lock);
2235 mutex_unlock(&ctx->mutex); 2236 mutex_unlock(&ctx->mutex);
2236 2237
2237 mutex_lock(&event->owner->perf_event_mutex);
2238 list_del_init(&event->owner_entry);
2239 mutex_unlock(&event->owner->perf_event_mutex);
2240 put_task_struct(event->owner);
2241
2242 free_event(event); 2238 free_event(event);
2243 2239
2244 return 0; 2240 return 0;
@@ -2251,9 +2247,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2251static int perf_release(struct inode *inode, struct file *file) 2247static int perf_release(struct inode *inode, struct file *file)
2252{ 2248{
2253 struct perf_event *event = file->private_data; 2249 struct perf_event *event = file->private_data;
2250 struct task_struct *owner;
2254 2251
2255 file->private_data = NULL; 2252 file->private_data = NULL;
2256 2253
2254 rcu_read_lock();
2255 owner = ACCESS_ONCE(event->owner);
2256 /*
2257 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2258 * !owner it means the list deletion is complete and we can indeed
2259 * free this event, otherwise we need to serialize on
2260 * owner->perf_event_mutex.
2261 */
2262 smp_read_barrier_depends();
2263 if (owner) {
2264 /*
2265 * Since delayed_put_task_struct() also drops the last
2266 * task reference we can safely take a new reference
2267 * while holding the rcu_read_lock().
2268 */
2269 get_task_struct(owner);
2270 }
2271 rcu_read_unlock();
2272
2273 if (owner) {
2274 mutex_lock(&owner->perf_event_mutex);
2275 /*
2276 * We have to re-check the event->owner field, if it is cleared
2277 * we raced with perf_event_exit_task(), acquiring the mutex
2278 * ensured they're done, and we can proceed with freeing the
2279 * event.
2280 */
2281 if (event->owner)
2282 list_del_init(&event->owner_entry);
2283 mutex_unlock(&owner->perf_event_mutex);
2284 put_task_struct(owner);
2285 }
2286
2257 return perf_event_release_kernel(event); 2287 return perf_event_release_kernel(event);
2258} 2288}
2259 2289
@@ -5677,7 +5707,7 @@ SYSCALL_DEFINE5(perf_event_open,
5677 mutex_unlock(&ctx->mutex); 5707 mutex_unlock(&ctx->mutex);
5678 5708
5679 event->owner = current; 5709 event->owner = current;
5680 get_task_struct(current); 5710
5681 mutex_lock(&current->perf_event_mutex); 5711 mutex_lock(&current->perf_event_mutex);
5682 list_add_tail(&event->owner_entry, &current->perf_event_list); 5712 list_add_tail(&event->owner_entry, &current->perf_event_list);
5683 mutex_unlock(&current->perf_event_mutex); 5713 mutex_unlock(&current->perf_event_mutex);
@@ -5745,12 +5775,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5745 ++ctx->generation; 5775 ++ctx->generation;
5746 mutex_unlock(&ctx->mutex); 5776 mutex_unlock(&ctx->mutex);
5747 5777
5748 event->owner = current;
5749 get_task_struct(current);
5750 mutex_lock(&current->perf_event_mutex);
5751 list_add_tail(&event->owner_entry, &current->perf_event_list);
5752 mutex_unlock(&current->perf_event_mutex);
5753
5754 return event; 5778 return event;
5755 5779
5756err_free: 5780err_free:
@@ -5901,8 +5925,24 @@ again:
5901 */ 5925 */
5902void perf_event_exit_task(struct task_struct *child) 5926void perf_event_exit_task(struct task_struct *child)
5903{ 5927{
5928 struct perf_event *event, *tmp;
5904 int ctxn; 5929 int ctxn;
5905 5930
5931 mutex_lock(&child->perf_event_mutex);
5932 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
5933 owner_entry) {
5934 list_del_init(&event->owner_entry);
5935
5936 /*
5937 * Ensure the list deletion is visible before we clear
5938 * the owner, closes a race against perf_release() where
5939 * we need to serialize on the owner->perf_event_mutex.
5940 */
5941 smp_wmb();
5942 event->owner = NULL;
5943 }
5944 mutex_unlock(&child->perf_event_mutex);
5945
5906 for_each_task_context_nr(ctxn) 5946 for_each_task_context_nr(ctxn)
5907 perf_event_exit_task_context(child, ctxn); 5947 perf_event_exit_task_context(child, ctxn);
5908} 5948}
@@ -6321,6 +6361,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6321 6361
6322void __init perf_event_init(void) 6362void __init perf_event_init(void)
6323{ 6363{
6364 int ret;
6365
6324 perf_event_init_all_cpus(); 6366 perf_event_init_all_cpus();
6325 init_srcu_struct(&pmus_srcu); 6367 init_srcu_struct(&pmus_srcu);
6326 perf_pmu_register(&perf_swevent); 6368 perf_pmu_register(&perf_swevent);
@@ -6328,4 +6370,7 @@ void __init perf_event_init(void)
6328 perf_pmu_register(&perf_task_clock); 6370 perf_pmu_register(&perf_task_clock);
6329 perf_tp_register(); 6371 perf_tp_register();
6330 perf_cpu_notifier(perf_cpu_notify); 6372 perf_cpu_notifier(perf_cpu_notify);
6373
6374 ret = init_hw_breakpoint();
6375 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6331} 6376}