aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3472bb1a070c..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -145,7 +145,8 @@ static struct srcu_struct pmus_srcu;
145 */ 145 */
146int sysctl_perf_event_paranoid __read_mostly = 1; 146int sysctl_perf_event_paranoid __read_mostly = 1;
147 147
148int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ 148/* Minimum for 512 kiB + 1 user control page */
149int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
149 150
150/* 151/*
151 * max perf event sample rate 152 * max perf event sample rate
@@ -363,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
363 } 364 }
364 365
365 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
366 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
367 * allow event_filter_match() to not 369 * allow event_filter_match() to not
368 * have to pass task around 370 * have to pass task around
@@ -941,6 +943,7 @@ static void perf_group_attach(struct perf_event *event)
941static void 943static void
942list_del_event(struct perf_event *event, struct perf_event_context *ctx) 944list_del_event(struct perf_event *event, struct perf_event_context *ctx)
943{ 945{
946 struct perf_cpu_context *cpuctx;
944 /* 947 /*
945 * We can have double detach due to exit/hot-unplug + close. 948 * We can have double detach due to exit/hot-unplug + close.
946 */ 949 */
@@ -949,8 +952,17 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
949 952
950 event->attach_state &= ~PERF_ATTACH_CONTEXT; 953 event->attach_state &= ~PERF_ATTACH_CONTEXT;
951 954
952 if (is_cgroup_event(event)) 955 if (is_cgroup_event(event)) {
953 ctx->nr_cgroups--; 956 ctx->nr_cgroups--;
957 cpuctx = __get_cpu_context(ctx);
958 /*
959 * if there are no more cgroup events
960 * then cler cgrp to avoid stale pointer
961 * in update_cgrp_time_from_cpuctx()
962 */
963 if (!ctx->nr_cgroups)
964 cpuctx->cgrp = NULL;
965 }
954 966
955 ctx->nr_events--; 967 ctx->nr_events--;
956 if (event->attr.inherit_stat) 968 if (event->attr.inherit_stat)
@@ -2412,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2412 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2413 goto out; 2425 goto out;
2414 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2415 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2416 2436
2417 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2436,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2436 2456
2437 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2438 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2439 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2440out: 2463out:
2441 local_irq_restore(flags); 2464 local_irq_restore(flags);
@@ -6520,6 +6543,11 @@ SYSCALL_DEFINE5(perf_event_open,
6520 goto err_alloc; 6543 goto err_alloc;
6521 } 6544 }
6522 6545
6546 if (task) {
6547 put_task_struct(task);
6548 task = NULL;
6549 }
6550
6523 /* 6551 /*
6524 * Look up the group leader (we will attach this event to it): 6552 * Look up the group leader (we will attach this event to it):
6525 */ 6553 */