diff options
Diffstat (limited to 'kernel/perf_event.c')
| -rw-r--r-- | kernel/perf_event.c | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c75925c4d1e2..8e81a9860a0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -145,8 +145,8 @@ static struct srcu_struct pmus_srcu; | |||
| 145 | */ | 145 | */ |
| 146 | int sysctl_perf_event_paranoid __read_mostly = 1; | 146 | int sysctl_perf_event_paranoid __read_mostly = 1; |
| 147 | 147 | ||
| 148 | /* Minimum for 128 pages + 1 for the user control page */ | 148 | /* Minimum for 512 kiB + 1 user control page */ |
| 149 | int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ | 149 | int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ |
| 150 | 150 | ||
| 151 | /* | 151 | /* |
| 152 | * max perf event sample rate | 152 | * max perf event sample rate |
| @@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | if (mode & PERF_CGROUP_SWIN) { | 366 | if (mode & PERF_CGROUP_SWIN) { |
| 367 | WARN_ON_ONCE(cpuctx->cgrp); | ||
| 367 | /* set cgrp before ctxsw in to | 368 | /* set cgrp before ctxsw in to |
| 368 | * allow event_filter_match() to not | 369 | * allow event_filter_match() to not |
| 369 | * have to pass task around | 370 | * have to pass task around |
| @@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2423 | if (!ctx || !ctx->nr_events) | 2424 | if (!ctx || !ctx->nr_events) |
| 2424 | goto out; | 2425 | goto out; |
| 2425 | 2426 | ||
| 2427 | /* | ||
| 2428 | * We must ctxsw out cgroup events to avoid conflict | ||
| 2429 | * when invoking perf_task_event_sched_in() later on | ||
| 2430 | * in this function. Otherwise we end up trying to | ||
| 2431 | * ctxswin cgroup events which are already scheduled | ||
| 2432 | * in. | ||
| 2433 | */ | ||
| 2434 | perf_cgroup_sched_out(current); | ||
| 2426 | task_ctx_sched_out(ctx, EVENT_ALL); | 2435 | task_ctx_sched_out(ctx, EVENT_ALL); |
| 2427 | 2436 | ||
| 2428 | raw_spin_lock(&ctx->lock); | 2437 | raw_spin_lock(&ctx->lock); |
| @@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2447 | 2456 | ||
| 2448 | raw_spin_unlock(&ctx->lock); | 2457 | raw_spin_unlock(&ctx->lock); |
| 2449 | 2458 | ||
| 2459 | /* | ||
| 2460 | * Also calls ctxswin for cgroup events, if any: | ||
| 2461 | */ | ||
| 2450 | perf_event_context_sched_in(ctx, ctx->task); | 2462 | perf_event_context_sched_in(ctx, ctx->task); |
| 2451 | out: | 2463 | out: |
| 2452 | local_irq_restore(flags); | 2464 | local_irq_restore(flags); |
| @@ -6531,6 +6543,11 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 6531 | goto err_alloc; | 6543 | goto err_alloc; |
| 6532 | } | 6544 | } |
| 6533 | 6545 | ||
| 6546 | if (task) { | ||
| 6547 | put_task_struct(task); | ||
| 6548 | task = NULL; | ||
| 6549 | } | ||
| 6550 | |||
| 6534 | /* | 6551 | /* |
| 6535 | * Look up the group leader (we will attach this event to it): | 6552 | * Look up the group leader (we will attach this event to it): |
| 6536 | */ | 6553 | */ |
