diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:06:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:06:09 -0400 |
commit | ebf546cc5391b9a8a17c1196b05b4357ef0138a2 (patch) | |
tree | 8ee8a9c1ef57710a07290228883db7ca742bf835 | |
parent | 9d9420f1209a1facea7110d549ac695f5aeeb503 (diff) | |
parent | 9c2b9d30e28559a78c9e431cdd7f2c6bf5a9ee67 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Two leftover fixes from the v3.17 cycle - these will be forwarded to
stable as well, if they prove problem-free in wider testing as well"
[ Side note: the "fix perf bug in fork()" fix had also come in through
Andrew's patch-bomb - Linus ]
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Fix perf bug in fork()
perf: Fix unclone_ctx() vs. locking
-rw-r--r-- | kernel/events/core.c | 54 |
1 files changed, 31 insertions, 23 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 385f11d94105..094df8c0742d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -906,13 +906,23 @@ static void put_ctx(struct perf_event_context *ctx) | |||
906 | } | 906 | } |
907 | } | 907 | } |
908 | 908 | ||
909 | static void unclone_ctx(struct perf_event_context *ctx) | 909 | /* |
910 | * This must be done under the ctx->lock, such as to serialize against | ||
911 | * context_equiv(), therefore we cannot call put_ctx() since that might end up | ||
912 | * calling scheduler related locks and ctx->lock nests inside those. | ||
913 | */ | ||
914 | static __must_check struct perf_event_context * | ||
915 | unclone_ctx(struct perf_event_context *ctx) | ||
910 | { | 916 | { |
911 | if (ctx->parent_ctx) { | 917 | struct perf_event_context *parent_ctx = ctx->parent_ctx; |
912 | put_ctx(ctx->parent_ctx); | 918 | |
919 | lockdep_assert_held(&ctx->lock); | ||
920 | |||
921 | if (parent_ctx) | ||
913 | ctx->parent_ctx = NULL; | 922 | ctx->parent_ctx = NULL; |
914 | } | ||
915 | ctx->generation++; | 923 | ctx->generation++; |
924 | |||
925 | return parent_ctx; | ||
916 | } | 926 | } |
917 | 927 | ||
918 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | 928 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) |
@@ -2259,6 +2269,9 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
2259 | static int context_equiv(struct perf_event_context *ctx1, | 2269 | static int context_equiv(struct perf_event_context *ctx1, |
2260 | struct perf_event_context *ctx2) | 2270 | struct perf_event_context *ctx2) |
2261 | { | 2271 | { |
2272 | lockdep_assert_held(&ctx1->lock); | ||
2273 | lockdep_assert_held(&ctx2->lock); | ||
2274 | |||
2262 | /* Pinning disables the swap optimization */ | 2275 | /* Pinning disables the swap optimization */ |
2263 | if (ctx1->pin_count || ctx2->pin_count) | 2276 | if (ctx1->pin_count || ctx2->pin_count) |
2264 | return 0; | 2277 | return 0; |
@@ -2992,6 +3005,7 @@ static int event_enable_on_exec(struct perf_event *event, | |||
2992 | */ | 3005 | */ |
2993 | static void perf_event_enable_on_exec(struct perf_event_context *ctx) | 3006 | static void perf_event_enable_on_exec(struct perf_event_context *ctx) |
2994 | { | 3007 | { |
3008 | struct perf_event_context *clone_ctx = NULL; | ||
2995 | struct perf_event *event; | 3009 | struct perf_event *event; |
2996 | unsigned long flags; | 3010 | unsigned long flags; |
2997 | int enabled = 0; | 3011 | int enabled = 0; |
@@ -3023,7 +3037,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
3023 | * Unclone this context if we enabled any event. | 3037 | * Unclone this context if we enabled any event. |
3024 | */ | 3038 | */ |
3025 | if (enabled) | 3039 | if (enabled) |
3026 | unclone_ctx(ctx); | 3040 | clone_ctx = unclone_ctx(ctx); |
3027 | 3041 | ||
3028 | raw_spin_unlock(&ctx->lock); | 3042 | raw_spin_unlock(&ctx->lock); |
3029 | 3043 | ||
@@ -3033,6 +3047,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
3033 | perf_event_context_sched_in(ctx, ctx->task); | 3047 | perf_event_context_sched_in(ctx, ctx->task); |
3034 | out: | 3048 | out: |
3035 | local_irq_restore(flags); | 3049 | local_irq_restore(flags); |
3050 | |||
3051 | if (clone_ctx) | ||
3052 | put_ctx(clone_ctx); | ||
3036 | } | 3053 | } |
3037 | 3054 | ||
3038 | void perf_event_exec(void) | 3055 | void perf_event_exec(void) |
@@ -3185,7 +3202,7 @@ errout: | |||
3185 | static struct perf_event_context * | 3202 | static struct perf_event_context * |
3186 | find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | 3203 | find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) |
3187 | { | 3204 | { |
3188 | struct perf_event_context *ctx; | 3205 | struct perf_event_context *ctx, *clone_ctx = NULL; |
3189 | struct perf_cpu_context *cpuctx; | 3206 | struct perf_cpu_context *cpuctx; |
3190 | unsigned long flags; | 3207 | unsigned long flags; |
3191 | int ctxn, err; | 3208 | int ctxn, err; |
@@ -3219,9 +3236,12 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | |||
3219 | retry: | 3236 | retry: |
3220 | ctx = perf_lock_task_context(task, ctxn, &flags); | 3237 | ctx = perf_lock_task_context(task, ctxn, &flags); |
3221 | if (ctx) { | 3238 | if (ctx) { |
3222 | unclone_ctx(ctx); | 3239 | clone_ctx = unclone_ctx(ctx); |
3223 | ++ctx->pin_count; | 3240 | ++ctx->pin_count; |
3224 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 3241 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
3242 | |||
3243 | if (clone_ctx) | ||
3244 | put_ctx(clone_ctx); | ||
3225 | } else { | 3245 | } else { |
3226 | ctx = alloc_perf_context(pmu, task); | 3246 | ctx = alloc_perf_context(pmu, task); |
3227 | err = -ENOMEM; | 3247 | err = -ENOMEM; |
@@ -7646,7 +7666,7 @@ __perf_event_exit_task(struct perf_event *child_event, | |||
7646 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | 7666 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) |
7647 | { | 7667 | { |
7648 | struct perf_event *child_event, *next; | 7668 | struct perf_event *child_event, *next; |
7649 | struct perf_event_context *child_ctx, *parent_ctx; | 7669 | struct perf_event_context *child_ctx, *clone_ctx = NULL; |
7650 | unsigned long flags; | 7670 | unsigned long flags; |
7651 | 7671 | ||
7652 | if (likely(!child->perf_event_ctxp[ctxn])) { | 7672 | if (likely(!child->perf_event_ctxp[ctxn])) { |
@@ -7673,28 +7693,16 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
7673 | child->perf_event_ctxp[ctxn] = NULL; | 7693 | child->perf_event_ctxp[ctxn] = NULL; |
7674 | 7694 | ||
7675 | /* | 7695 | /* |
7676 | * In order to avoid freeing: child_ctx->parent_ctx->task | ||
7677 | * under perf_event_context::lock, grab another reference. | ||
7678 | */ | ||
7679 | parent_ctx = child_ctx->parent_ctx; | ||
7680 | if (parent_ctx) | ||
7681 | get_ctx(parent_ctx); | ||
7682 | |||
7683 | /* | ||
7684 | * If this context is a clone; unclone it so it can't get | 7696 | * If this context is a clone; unclone it so it can't get |
7685 | * swapped to another process while we're removing all | 7697 | * swapped to another process while we're removing all |
7686 | * the events from it. | 7698 | * the events from it. |
7687 | */ | 7699 | */ |
7688 | unclone_ctx(child_ctx); | 7700 | clone_ctx = unclone_ctx(child_ctx); |
7689 | update_context_time(child_ctx); | 7701 | update_context_time(child_ctx); |
7690 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); | 7702 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); |
7691 | 7703 | ||
7692 | /* | 7704 | if (clone_ctx) |
7693 | * Now that we no longer hold perf_event_context::lock, drop | 7705 | put_ctx(clone_ctx); |
7694 | * our extra child_ctx->parent_ctx reference. | ||
7695 | */ | ||
7696 | if (parent_ctx) | ||
7697 | put_ctx(parent_ctx); | ||
7698 | 7706 | ||
7699 | /* | 7707 | /* |
7700 | * Report the task dead after unscheduling the events so that we | 7708 | * Report the task dead after unscheduling the events so that we |