diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1f38270f08c7..2ae7409bf38f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event, | |||
3259 | task_event->event_id.tid = perf_event_tid(event, task); | 3259 | task_event->event_id.tid = perf_event_tid(event, task); |
3260 | task_event->event_id.ptid = perf_event_tid(event, current); | 3260 | task_event->event_id.ptid = perf_event_tid(event, current); |
3261 | 3261 | ||
3262 | task_event->event_id.time = perf_clock(); | ||
3263 | |||
3264 | perf_output_put(&handle, task_event->event_id); | 3262 | perf_output_put(&handle, task_event->event_id); |
3265 | 3263 | ||
3266 | perf_output_end(&handle); | 3264 | perf_output_end(&handle); |
@@ -3268,6 +3266,9 @@ static void perf_event_task_output(struct perf_event *event, | |||
3268 | 3266 | ||
3269 | static int perf_event_task_match(struct perf_event *event) | 3267 | static int perf_event_task_match(struct perf_event *event) |
3270 | { | 3268 | { |
3269 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
3270 | return 0; | ||
3271 | |||
3271 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3272 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3272 | return 0; | 3273 | return 0; |
3273 | 3274 | ||
@@ -3297,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
3297 | cpuctx = &get_cpu_var(perf_cpu_context); | 3298 | cpuctx = &get_cpu_var(perf_cpu_context); |
3298 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3299 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
3299 | if (!ctx) | 3300 | if (!ctx) |
3300 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | 3301 | ctx = rcu_dereference(current->perf_event_ctxp); |
3301 | if (ctx) | 3302 | if (ctx) |
3302 | perf_event_task_ctx(ctx, task_event); | 3303 | perf_event_task_ctx(ctx, task_event); |
3303 | put_cpu_var(perf_cpu_context); | 3304 | put_cpu_var(perf_cpu_context); |
@@ -3328,6 +3329,7 @@ static void perf_event_task(struct task_struct *task, | |||
3328 | /* .ppid */ | 3329 | /* .ppid */ |
3329 | /* .tid */ | 3330 | /* .tid */ |
3330 | /* .ptid */ | 3331 | /* .ptid */ |
3332 | .time = perf_clock(), | ||
3331 | }, | 3333 | }, |
3332 | }; | 3334 | }; |
3333 | 3335 | ||
@@ -3377,6 +3379,9 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3377 | 3379 | ||
3378 | static int perf_event_comm_match(struct perf_event *event) | 3380 | static int perf_event_comm_match(struct perf_event *event) |
3379 | { | 3381 | { |
3382 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
3383 | return 0; | ||
3384 | |||
3380 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3385 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3381 | return 0; | 3386 | return 0; |
3382 | 3387 | ||
@@ -3494,6 +3499,9 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
3494 | static int perf_event_mmap_match(struct perf_event *event, | 3499 | static int perf_event_mmap_match(struct perf_event *event, |
3495 | struct perf_mmap_event *mmap_event) | 3500 | struct perf_mmap_event *mmap_event) |
3496 | { | 3501 | { |
3502 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
3503 | return 0; | ||
3504 | |||
3497 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3505 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3498 | return 0; | 3506 | return 0; |
3499 | 3507 | ||
@@ -4571,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
4571 | if (attr->type >= PERF_TYPE_MAX) | 4579 | if (attr->type >= PERF_TYPE_MAX) |
4572 | return -EINVAL; | 4580 | return -EINVAL; |
4573 | 4581 | ||
4574 | if (attr->__reserved_1 || attr->__reserved_2) | 4582 | if (attr->__reserved_1) |
4575 | return -EINVAL; | 4583 | return -EINVAL; |
4576 | 4584 | ||
4577 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 4585 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
@@ -5148,7 +5156,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5148 | GFP_KERNEL); | 5156 | GFP_KERNEL); |
5149 | if (!child_ctx) { | 5157 | if (!child_ctx) { |
5150 | ret = -ENOMEM; | 5158 | ret = -ENOMEM; |
5151 | goto exit; | 5159 | break; |
5152 | } | 5160 | } |
5153 | 5161 | ||
5154 | __perf_event_init_context(child_ctx, child); | 5162 | __perf_event_init_context(child_ctx, child); |
@@ -5164,7 +5172,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5164 | } | 5172 | } |
5165 | } | 5173 | } |
5166 | 5174 | ||
5167 | if (inherited_all) { | 5175 | if (child_ctx && inherited_all) { |
5168 | /* | 5176 | /* |
5169 | * Mark the child context as a clone of the parent | 5177 | * Mark the child context as a clone of the parent |
5170 | * context, or of whatever the parent is a clone of. | 5178 | * context, or of whatever the parent is a clone of. |
@@ -5184,7 +5192,6 @@ int perf_event_init_task(struct task_struct *child) | |||
5184 | get_ctx(child_ctx->parent_ctx); | 5192 | get_ctx(child_ctx->parent_ctx); |
5185 | } | 5193 | } |
5186 | 5194 | ||
5187 | exit: | ||
5188 | mutex_unlock(&parent_ctx->mutex); | 5195 | mutex_unlock(&parent_ctx->mutex); |
5189 | 5196 | ||
5190 | perf_unpin_context(parent_ctx); | 5197 | perf_unpin_context(parent_ctx); |