aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c65
1 files changed, 40 insertions, 25 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9052d6c8c9fd..d27746bd3a06 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,9 @@ static void __perf_install_in_context(void *info)
782 782
783 add_event_to_ctx(event, ctx); 783 add_event_to_ctx(event, ctx);
784 784
785 if (event->cpu != -1 && event->cpu != smp_processor_id())
786 goto unlock;
787
785 /* 788 /*
786 * Don't put the event on if it is disabled or if 789 * Don't put the event on if it is disabled or if
787 * it is in a group and the group isn't on. 790 * it is in a group and the group isn't on.
@@ -925,6 +928,9 @@ static void __perf_event_enable(void *info)
925 goto unlock; 928 goto unlock;
926 __perf_event_mark_enabled(event, ctx); 929 __perf_event_mark_enabled(event, ctx);
927 930
931 if (event->cpu != -1 && event->cpu != smp_processor_id())
932 goto unlock;
933
928 /* 934 /*
929 * If the event is in a group and isn't the group leader, 935 * If the event is in a group and isn't the group leader,
930 * then don't put it on unless the group is on. 936 * then don't put it on unless the group is on.
@@ -1375,6 +1381,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1381 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376 continue; 1382 continue;
1377 1383
1384 if (event->cpu != -1 && event->cpu != smp_processor_id())
1385 continue;
1386
1378 hwc = &event->hw; 1387 hwc = &event->hw;
1379 1388
1380 interrupts = hwc->interrupts; 1389 interrupts = hwc->interrupts;
@@ -1595,15 +1604,12 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1595 unsigned long flags; 1604 unsigned long flags;
1596 int err; 1605 int err;
1597 1606
1598 /* 1607 if (pid == -1 && cpu != -1) {
1599 * If cpu is not a wildcard then this is a percpu event:
1600 */
1601 if (cpu != -1) {
1602 /* Must be root to operate on a CPU event: */ 1608 /* Must be root to operate on a CPU event: */
1603 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1609 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1604 return ERR_PTR(-EACCES); 1610 return ERR_PTR(-EACCES);
1605 1611
1606 if (cpu < 0 || cpu > num_possible_cpus()) 1612 if (cpu < 0 || cpu >= nr_cpumask_bits)
1607 return ERR_PTR(-EINVAL); 1613 return ERR_PTR(-EINVAL);
1608 1614
1609 /* 1615 /*
@@ -1611,7 +1617,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1611 * offline CPU and activate it when the CPU comes up, but 1617 * offline CPU and activate it when the CPU comes up, but
1612 * that's for later. 1618 * that's for later.
1613 */ 1619 */
1614 if (!cpu_isset(cpu, cpu_online_map)) 1620 if (!cpu_online(cpu))
1615 return ERR_PTR(-ENODEV); 1621 return ERR_PTR(-ENODEV);
1616 1622
1617 cpuctx = &per_cpu(perf_cpu_context, cpu); 1623 cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -3262,6 +3268,12 @@ static void perf_event_task_output(struct perf_event *event,
3262 3268
3263static int perf_event_task_match(struct perf_event *event) 3269static int perf_event_task_match(struct perf_event *event)
3264{ 3270{
3271 if (event->state != PERF_EVENT_STATE_ACTIVE)
3272 return 0;
3273
3274 if (event->cpu != -1 && event->cpu != smp_processor_id())
3275 return 0;
3276
3265 if (event->attr.comm || event->attr.mmap || event->attr.task) 3277 if (event->attr.comm || event->attr.mmap || event->attr.task)
3266 return 1; 3278 return 1;
3267 3279
@@ -3287,12 +3299,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3287 rcu_read_lock(); 3299 rcu_read_lock();
3288 cpuctx = &get_cpu_var(perf_cpu_context); 3300 cpuctx = &get_cpu_var(perf_cpu_context);
3289 perf_event_task_ctx(&cpuctx->ctx, task_event); 3301 perf_event_task_ctx(&cpuctx->ctx, task_event);
3290 put_cpu_var(perf_cpu_context);
3291
3292 if (!ctx) 3302 if (!ctx)
3293 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3303 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3294 if (ctx) 3304 if (ctx)
3295 perf_event_task_ctx(ctx, task_event); 3305 perf_event_task_ctx(ctx, task_event);
3306 put_cpu_var(perf_cpu_context);
3296 rcu_read_unlock(); 3307 rcu_read_unlock();
3297} 3308}
3298 3309
@@ -3369,6 +3380,12 @@ static void perf_event_comm_output(struct perf_event *event,
3369 3380
3370static int perf_event_comm_match(struct perf_event *event) 3381static int perf_event_comm_match(struct perf_event *event)
3371{ 3382{
3383 if (event->state != PERF_EVENT_STATE_ACTIVE)
3384 return 0;
3385
3386 if (event->cpu != -1 && event->cpu != smp_processor_id())
3387 return 0;
3388
3372 if (event->attr.comm) 3389 if (event->attr.comm)
3373 return 1; 3390 return 1;
3374 3391
@@ -3405,15 +3422,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3405 rcu_read_lock(); 3422 rcu_read_lock();
3406 cpuctx = &get_cpu_var(perf_cpu_context); 3423 cpuctx = &get_cpu_var(perf_cpu_context);
3407 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3424 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3408 put_cpu_var(perf_cpu_context);
3409
3410 /*
3411 * doesn't really matter which of the child contexts the
3412 * events ends up in.
3413 */
3414 ctx = rcu_dereference(current->perf_event_ctxp); 3425 ctx = rcu_dereference(current->perf_event_ctxp);
3415 if (ctx) 3426 if (ctx)
3416 perf_event_comm_ctx(ctx, comm_event); 3427 perf_event_comm_ctx(ctx, comm_event);
3428 put_cpu_var(perf_cpu_context);
3417 rcu_read_unlock(); 3429 rcu_read_unlock();
3418} 3430}
3419 3431
@@ -3488,6 +3500,12 @@ static void perf_event_mmap_output(struct perf_event *event,
3488static int perf_event_mmap_match(struct perf_event *event, 3500static int perf_event_mmap_match(struct perf_event *event,
3489 struct perf_mmap_event *mmap_event) 3501 struct perf_mmap_event *mmap_event)
3490{ 3502{
3503 if (event->state != PERF_EVENT_STATE_ACTIVE)
3504 return 0;
3505
3506 if (event->cpu != -1 && event->cpu != smp_processor_id())
3507 return 0;
3508
3491 if (event->attr.mmap) 3509 if (event->attr.mmap)
3492 return 1; 3510 return 1;
3493 3511
@@ -3561,15 +3579,10 @@ got_name:
3561 rcu_read_lock(); 3579 rcu_read_lock();
3562 cpuctx = &get_cpu_var(perf_cpu_context); 3580 cpuctx = &get_cpu_var(perf_cpu_context);
3563 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3581 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3564 put_cpu_var(perf_cpu_context);
3565
3566 /*
3567 * doesn't really matter which of the child contexts the
3568 * events ends up in.
3569 */
3570 ctx = rcu_dereference(current->perf_event_ctxp); 3582 ctx = rcu_dereference(current->perf_event_ctxp);
3571 if (ctx) 3583 if (ctx)
3572 perf_event_mmap_ctx(ctx, mmap_event); 3584 perf_event_mmap_ctx(ctx, mmap_event);
3585 put_cpu_var(perf_cpu_context);
3573 rcu_read_unlock(); 3586 rcu_read_unlock();
3574 3587
3575 kfree(buf); 3588 kfree(buf);
@@ -3860,6 +3873,9 @@ static int perf_swevent_match(struct perf_event *event,
3860 struct perf_sample_data *data, 3873 struct perf_sample_data *data,
3861 struct pt_regs *regs) 3874 struct pt_regs *regs)
3862{ 3875{
3876 if (event->cpu != -1 && event->cpu != smp_processor_id())
3877 return 0;
3878
3863 if (!perf_swevent_is_counting(event)) 3879 if (!perf_swevent_is_counting(event))
3864 return 0; 3880 return 0;
3865 3881
@@ -4564,7 +4580,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
4564 if (attr->type >= PERF_TYPE_MAX) 4580 if (attr->type >= PERF_TYPE_MAX)
4565 return -EINVAL; 4581 return -EINVAL;
4566 4582
4567 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) 4583 if (attr->__reserved_1 || attr->__reserved_2)
4568 return -EINVAL; 4584 return -EINVAL;
4569 4585
4570 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 4586 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4717,7 +4733,7 @@ SYSCALL_DEFINE5(perf_event_open,
4717 if (IS_ERR(event)) 4733 if (IS_ERR(event))
4718 goto err_put_context; 4734 goto err_put_context;
4719 4735
4720 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); 4736 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
4721 if (err < 0) 4737 if (err < 0)
4722 goto err_free_put_context; 4738 goto err_free_put_context;
4723 4739
@@ -5141,7 +5157,7 @@ int perf_event_init_task(struct task_struct *child)
5141 GFP_KERNEL); 5157 GFP_KERNEL);
5142 if (!child_ctx) { 5158 if (!child_ctx) {
5143 ret = -ENOMEM; 5159 ret = -ENOMEM;
5144 goto exit; 5160 break;
5145 } 5161 }
5146 5162
5147 __perf_event_init_context(child_ctx, child); 5163 __perf_event_init_context(child_ctx, child);
@@ -5157,7 +5173,7 @@ int perf_event_init_task(struct task_struct *child)
5157 } 5173 }
5158 } 5174 }
5159 5175
5160 if (inherited_all) { 5176 if (child_ctx && inherited_all) {
5161 /* 5177 /*
5162 * Mark the child context as a clone of the parent 5178 * Mark the child context as a clone of the parent
5163 * context, or of whatever the parent is a clone of. 5179 * context, or of whatever the parent is a clone of.
@@ -5177,7 +5193,6 @@ int perf_event_init_task(struct task_struct *child)
5177 get_ctx(child_ctx->parent_ctx); 5193 get_ctx(child_ctx->parent_ctx);
5178 } 5194 }
5179 5195
5180exit:
5181 mutex_unlock(&parent_ctx->mutex); 5196 mutex_unlock(&parent_ctx->mutex);
5182 5197
5183 perf_unpin_context(parent_ctx); 5198 perf_unpin_context(parent_ctx);