aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c171
1 files changed, 93 insertions, 78 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e73e53c7582f..d27746bd3a06 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
203 * if so. If we locked the right context, then it 203 * if so. If we locked the right context, then it
204 * can't get swapped on us any more. 204 * can't get swapped on us any more.
205 */ 205 */
206 spin_lock_irqsave(&ctx->lock, *flags); 206 raw_spin_lock_irqsave(&ctx->lock, *flags);
207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208 spin_unlock_irqrestore(&ctx->lock, *flags); 208 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209 goto retry; 209 goto retry;
210 } 210 }
211 211
212 if (!atomic_inc_not_zero(&ctx->refcount)) { 212 if (!atomic_inc_not_zero(&ctx->refcount)) {
213 spin_unlock_irqrestore(&ctx->lock, *flags); 213 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214 ctx = NULL; 214 ctx = NULL;
215 } 215 }
216 } 216 }
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
231 ctx = perf_lock_task_context(task, &flags); 231 ctx = perf_lock_task_context(task, &flags);
232 if (ctx) { 232 if (ctx) {
233 ++ctx->pin_count; 233 ++ctx->pin_count;
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235 } 235 }
236 return ctx; 236 return ctx;
237} 237}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 242
243 spin_lock_irqsave(&ctx->lock, flags); 243 raw_spin_lock_irqsave(&ctx->lock, flags);
244 --ctx->pin_count; 244 --ctx->pin_count;
245 spin_unlock_irqrestore(&ctx->lock, flags); 245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
427 if (ctx->task && cpuctx->task_ctx != ctx) 427 if (ctx->task && cpuctx->task_ctx != ctx)
428 return; 428 return;
429 429
430 spin_lock(&ctx->lock); 430 raw_spin_lock(&ctx->lock);
431 /* 431 /*
432 * Protect the list operation against NMI by disabling the 432 * Protect the list operation against NMI by disabling the
433 * events on a global level. 433 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
449 } 449 }
450 450
451 perf_enable(); 451 perf_enable();
452 spin_unlock(&ctx->lock); 452 raw_spin_unlock(&ctx->lock);
453} 453}
454 454
455 455
@@ -488,12 +488,12 @@ retry:
488 task_oncpu_function_call(task, __perf_event_remove_from_context, 488 task_oncpu_function_call(task, __perf_event_remove_from_context,
489 event); 489 event);
490 490
491 spin_lock_irq(&ctx->lock); 491 raw_spin_lock_irq(&ctx->lock);
492 /* 492 /*
493 * If the context is active we need to retry the smp call. 493 * If the context is active we need to retry the smp call.
494 */ 494 */
495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 spin_unlock_irq(&ctx->lock); 496 raw_spin_unlock_irq(&ctx->lock);
497 goto retry; 497 goto retry;
498 } 498 }
499 499
@@ -504,7 +504,7 @@ retry:
504 */ 504 */
505 if (!list_empty(&event->group_entry)) 505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx); 506 list_del_event(event, ctx);
507 spin_unlock_irq(&ctx->lock); 507 raw_spin_unlock_irq(&ctx->lock);
508} 508}
509 509
510/* 510/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
535 if (ctx->task && cpuctx->task_ctx != ctx) 535 if (ctx->task && cpuctx->task_ctx != ctx)
536 return; 536 return;
537 537
538 spin_lock(&ctx->lock); 538 raw_spin_lock(&ctx->lock);
539 539
540 /* 540 /*
541 * If the event is on, turn it off. 541 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
551 event->state = PERF_EVENT_STATE_OFF; 551 event->state = PERF_EVENT_STATE_OFF;
552 } 552 }
553 553
554 spin_unlock(&ctx->lock); 554 raw_spin_unlock(&ctx->lock);
555} 555}
556 556
557/* 557/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
584 retry: 584 retry:
585 task_oncpu_function_call(task, __perf_event_disable, event); 585 task_oncpu_function_call(task, __perf_event_disable, event);
586 586
587 spin_lock_irq(&ctx->lock); 587 raw_spin_lock_irq(&ctx->lock);
588 /* 588 /*
589 * If the event is still active, we need to retry the cross-call. 589 * If the event is still active, we need to retry the cross-call.
590 */ 590 */
591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 spin_unlock_irq(&ctx->lock); 592 raw_spin_unlock_irq(&ctx->lock);
593 goto retry; 593 goto retry;
594 } 594 }
595 595
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
602 event->state = PERF_EVENT_STATE_OFF; 602 event->state = PERF_EVENT_STATE_OFF;
603 } 603 }
604 604
605 spin_unlock_irq(&ctx->lock); 605 raw_spin_unlock_irq(&ctx->lock);
606} 606}
607 607
608static int 608static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
770 cpuctx->task_ctx = ctx; 770 cpuctx->task_ctx = ctx;
771 } 771 }
772 772
773 spin_lock(&ctx->lock); 773 raw_spin_lock(&ctx->lock);
774 ctx->is_active = 1; 774 ctx->is_active = 1;
775 update_context_time(ctx); 775 update_context_time(ctx);
776 776
@@ -782,6 +782,9 @@ static void __perf_install_in_context(void *info)
782 782
783 add_event_to_ctx(event, ctx); 783 add_event_to_ctx(event, ctx);
784 784
785 if (event->cpu != -1 && event->cpu != smp_processor_id())
786 goto unlock;
787
785 /* 788 /*
786 * Don't put the event on if it is disabled or if 789 * Don't put the event on if it is disabled or if
787 * it is in a group and the group isn't on. 790 * it is in a group and the group isn't on.
@@ -820,7 +823,7 @@ static void __perf_install_in_context(void *info)
820 unlock: 823 unlock:
821 perf_enable(); 824 perf_enable();
822 825
823 spin_unlock(&ctx->lock); 826 raw_spin_unlock(&ctx->lock);
824} 827}
825 828
826/* 829/*
@@ -856,12 +859,12 @@ retry:
856 task_oncpu_function_call(task, __perf_install_in_context, 859 task_oncpu_function_call(task, __perf_install_in_context,
857 event); 860 event);
858 861
859 spin_lock_irq(&ctx->lock); 862 raw_spin_lock_irq(&ctx->lock);
860 /* 863 /*
861 * we need to retry the smp call. 864 * we need to retry the smp call.
862 */ 865 */
863 if (ctx->is_active && list_empty(&event->group_entry)) { 866 if (ctx->is_active && list_empty(&event->group_entry)) {
864 spin_unlock_irq(&ctx->lock); 867 raw_spin_unlock_irq(&ctx->lock);
865 goto retry; 868 goto retry;
866 } 869 }
867 870
@@ -872,7 +875,7 @@ retry:
872 */ 875 */
873 if (list_empty(&event->group_entry)) 876 if (list_empty(&event->group_entry))
874 add_event_to_ctx(event, ctx); 877 add_event_to_ctx(event, ctx);
875 spin_unlock_irq(&ctx->lock); 878 raw_spin_unlock_irq(&ctx->lock);
876} 879}
877 880
878/* 881/*
@@ -917,7 +920,7 @@ static void __perf_event_enable(void *info)
917 cpuctx->task_ctx = ctx; 920 cpuctx->task_ctx = ctx;
918 } 921 }
919 922
920 spin_lock(&ctx->lock); 923 raw_spin_lock(&ctx->lock);
921 ctx->is_active = 1; 924 ctx->is_active = 1;
922 update_context_time(ctx); 925 update_context_time(ctx);
923 926
@@ -925,6 +928,9 @@ static void __perf_event_enable(void *info)
925 goto unlock; 928 goto unlock;
926 __perf_event_mark_enabled(event, ctx); 929 __perf_event_mark_enabled(event, ctx);
927 930
931 if (event->cpu != -1 && event->cpu != smp_processor_id())
932 goto unlock;
933
928 /* 934 /*
929 * If the event is in a group and isn't the group leader, 935 * If the event is in a group and isn't the group leader,
930 * then don't put it on unless the group is on. 936 * then don't put it on unless the group is on.
@@ -959,7 +965,7 @@ static void __perf_event_enable(void *info)
959 } 965 }
960 966
961 unlock: 967 unlock:
962 spin_unlock(&ctx->lock); 968 raw_spin_unlock(&ctx->lock);
963} 969}
964 970
965/* 971/*
@@ -985,7 +991,7 @@ void perf_event_enable(struct perf_event *event)
985 return; 991 return;
986 } 992 }
987 993
988 spin_lock_irq(&ctx->lock); 994 raw_spin_lock_irq(&ctx->lock);
989 if (event->state >= PERF_EVENT_STATE_INACTIVE) 995 if (event->state >= PERF_EVENT_STATE_INACTIVE)
990 goto out; 996 goto out;
991 997
@@ -1000,10 +1006,10 @@ void perf_event_enable(struct perf_event *event)
1000 event->state = PERF_EVENT_STATE_OFF; 1006 event->state = PERF_EVENT_STATE_OFF;
1001 1007
1002 retry: 1008 retry:
1003 spin_unlock_irq(&ctx->lock); 1009 raw_spin_unlock_irq(&ctx->lock);
1004 task_oncpu_function_call(task, __perf_event_enable, event); 1010 task_oncpu_function_call(task, __perf_event_enable, event);
1005 1011
1006 spin_lock_irq(&ctx->lock); 1012 raw_spin_lock_irq(&ctx->lock);
1007 1013
1008 /* 1014 /*
1009 * If the context is active and the event is still off, 1015 * If the context is active and the event is still off,
@@ -1020,7 +1026,7 @@ void perf_event_enable(struct perf_event *event)
1020 __perf_event_mark_enabled(event, ctx); 1026 __perf_event_mark_enabled(event, ctx);
1021 1027
1022 out: 1028 out:
1023 spin_unlock_irq(&ctx->lock); 1029 raw_spin_unlock_irq(&ctx->lock);
1024} 1030}
1025 1031
1026static int perf_event_refresh(struct perf_event *event, int refresh) 1032static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1048,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1042{ 1048{
1043 struct perf_event *event; 1049 struct perf_event *event;
1044 1050
1045 spin_lock(&ctx->lock); 1051 raw_spin_lock(&ctx->lock);
1046 ctx->is_active = 0; 1052 ctx->is_active = 0;
1047 if (likely(!ctx->nr_events)) 1053 if (likely(!ctx->nr_events))
1048 goto out; 1054 goto out;
@@ -1055,7 +1061,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1055 } 1061 }
1056 perf_enable(); 1062 perf_enable();
1057 out: 1063 out:
1058 spin_unlock(&ctx->lock); 1064 raw_spin_unlock(&ctx->lock);
1059} 1065}
1060 1066
1061/* 1067/*
@@ -1193,8 +1199,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1193 * order we take the locks because no other cpu could 1199 * order we take the locks because no other cpu could
1194 * be trying to lock both of these tasks. 1200 * be trying to lock both of these tasks.
1195 */ 1201 */
1196 spin_lock(&ctx->lock); 1202 raw_spin_lock(&ctx->lock);
1197 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1203 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1198 if (context_equiv(ctx, next_ctx)) { 1204 if (context_equiv(ctx, next_ctx)) {
1199 /* 1205 /*
1200 * XXX do we need a memory barrier of sorts 1206 * XXX do we need a memory barrier of sorts
@@ -1208,8 +1214,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1208 1214
1209 perf_event_sync_stat(ctx, next_ctx); 1215 perf_event_sync_stat(ctx, next_ctx);
1210 } 1216 }
1211 spin_unlock(&next_ctx->lock); 1217 raw_spin_unlock(&next_ctx->lock);
1212 spin_unlock(&ctx->lock); 1218 raw_spin_unlock(&ctx->lock);
1213 } 1219 }
1214 rcu_read_unlock(); 1220 rcu_read_unlock();
1215 1221
@@ -1251,7 +1257,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1251 struct perf_event *event; 1257 struct perf_event *event;
1252 int can_add_hw = 1; 1258 int can_add_hw = 1;
1253 1259
1254 spin_lock(&ctx->lock); 1260 raw_spin_lock(&ctx->lock);
1255 ctx->is_active = 1; 1261 ctx->is_active = 1;
1256 if (likely(!ctx->nr_events)) 1262 if (likely(!ctx->nr_events))
1257 goto out; 1263 goto out;
@@ -1306,7 +1312,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1306 } 1312 }
1307 perf_enable(); 1313 perf_enable();
1308 out: 1314 out:
1309 spin_unlock(&ctx->lock); 1315 raw_spin_unlock(&ctx->lock);
1310} 1316}
1311 1317
1312/* 1318/*
@@ -1370,11 +1376,14 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1370 struct hw_perf_event *hwc; 1376 struct hw_perf_event *hwc;
1371 u64 interrupts, freq; 1377 u64 interrupts, freq;
1372 1378
1373 spin_lock(&ctx->lock); 1379 raw_spin_lock(&ctx->lock);
1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1380 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1381 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376 continue; 1382 continue;
1377 1383
1384 if (event->cpu != -1 && event->cpu != smp_processor_id())
1385 continue;
1386
1378 hwc = &event->hw; 1387 hwc = &event->hw;
1379 1388
1380 interrupts = hwc->interrupts; 1389 interrupts = hwc->interrupts;
@@ -1425,7 +1434,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1425 perf_enable(); 1434 perf_enable();
1426 } 1435 }
1427 } 1436 }
1428 spin_unlock(&ctx->lock); 1437 raw_spin_unlock(&ctx->lock);
1429} 1438}
1430 1439
1431/* 1440/*
@@ -1438,7 +1447,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1438 if (!ctx->nr_events) 1447 if (!ctx->nr_events)
1439 return; 1448 return;
1440 1449
1441 spin_lock(&ctx->lock); 1450 raw_spin_lock(&ctx->lock);
1442 /* 1451 /*
1443 * Rotate the first entry last (works just fine for group events too): 1452 * Rotate the first entry last (works just fine for group events too):
1444 */ 1453 */
@@ -1449,7 +1458,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1449 } 1458 }
1450 perf_enable(); 1459 perf_enable();
1451 1460
1452 spin_unlock(&ctx->lock); 1461 raw_spin_unlock(&ctx->lock);
1453} 1462}
1454 1463
1455void perf_event_task_tick(struct task_struct *curr, int cpu) 1464void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1507,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1498 1507
1499 __perf_event_task_sched_out(ctx); 1508 __perf_event_task_sched_out(ctx);
1500 1509
1501 spin_lock(&ctx->lock); 1510 raw_spin_lock(&ctx->lock);
1502 1511
1503 list_for_each_entry(event, &ctx->group_list, group_entry) { 1512 list_for_each_entry(event, &ctx->group_list, group_entry) {
1504 if (!event->attr.enable_on_exec) 1513 if (!event->attr.enable_on_exec)
@@ -1516,7 +1525,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1516 if (enabled) 1525 if (enabled)
1517 unclone_ctx(ctx); 1526 unclone_ctx(ctx);
1518 1527
1519 spin_unlock(&ctx->lock); 1528 raw_spin_unlock(&ctx->lock);
1520 1529
1521 perf_event_task_sched_in(task, smp_processor_id()); 1530 perf_event_task_sched_in(task, smp_processor_id());
1522 out: 1531 out:
@@ -1542,10 +1551,10 @@ static void __perf_event_read(void *info)
1542 if (ctx->task && cpuctx->task_ctx != ctx) 1551 if (ctx->task && cpuctx->task_ctx != ctx)
1543 return; 1552 return;
1544 1553
1545 spin_lock(&ctx->lock); 1554 raw_spin_lock(&ctx->lock);
1546 update_context_time(ctx); 1555 update_context_time(ctx);
1547 update_event_times(event); 1556 update_event_times(event);
1548 spin_unlock(&ctx->lock); 1557 raw_spin_unlock(&ctx->lock);
1549 1558
1550 event->pmu->read(event); 1559 event->pmu->read(event);
1551} 1560}
@@ -1563,10 +1572,10 @@ static u64 perf_event_read(struct perf_event *event)
1563 struct perf_event_context *ctx = event->ctx; 1572 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags; 1573 unsigned long flags;
1565 1574
1566 spin_lock_irqsave(&ctx->lock, flags); 1575 raw_spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx); 1576 update_context_time(ctx);
1568 update_event_times(event); 1577 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags); 1578 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1570 } 1579 }
1571 1580
1572 return atomic64_read(&event->count); 1581 return atomic64_read(&event->count);
@@ -1579,7 +1588,7 @@ static void
1579__perf_event_init_context(struct perf_event_context *ctx, 1588__perf_event_init_context(struct perf_event_context *ctx,
1580 struct task_struct *task) 1589 struct task_struct *task)
1581{ 1590{
1582 spin_lock_init(&ctx->lock); 1591 raw_spin_lock_init(&ctx->lock);
1583 mutex_init(&ctx->mutex); 1592 mutex_init(&ctx->mutex);
1584 INIT_LIST_HEAD(&ctx->group_list); 1593 INIT_LIST_HEAD(&ctx->group_list);
1585 INIT_LIST_HEAD(&ctx->event_list); 1594 INIT_LIST_HEAD(&ctx->event_list);
@@ -1595,15 +1604,12 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1595 unsigned long flags; 1604 unsigned long flags;
1596 int err; 1605 int err;
1597 1606
1598 /* 1607 if (pid == -1 && cpu != -1) {
1599 * If cpu is not a wildcard then this is a percpu event:
1600 */
1601 if (cpu != -1) {
1602 /* Must be root to operate on a CPU event: */ 1608 /* Must be root to operate on a CPU event: */
1603 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1609 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1604 return ERR_PTR(-EACCES); 1610 return ERR_PTR(-EACCES);
1605 1611
1606 if (cpu < 0 || cpu > num_possible_cpus()) 1612 if (cpu < 0 || cpu >= nr_cpumask_bits)
1607 return ERR_PTR(-EINVAL); 1613 return ERR_PTR(-EINVAL);
1608 1614
1609 /* 1615 /*
@@ -1611,7 +1617,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1611 * offline CPU and activate it when the CPU comes up, but 1617 * offline CPU and activate it when the CPU comes up, but
1612 * that's for later. 1618 * that's for later.
1613 */ 1619 */
1614 if (!cpu_isset(cpu, cpu_online_map)) 1620 if (!cpu_online(cpu))
1615 return ERR_PTR(-ENODEV); 1621 return ERR_PTR(-ENODEV);
1616 1622
1617 cpuctx = &per_cpu(perf_cpu_context, cpu); 1623 cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -1649,7 +1655,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1649 ctx = perf_lock_task_context(task, &flags); 1655 ctx = perf_lock_task_context(task, &flags);
1650 if (ctx) { 1656 if (ctx) {
1651 unclone_ctx(ctx); 1657 unclone_ctx(ctx);
1652 spin_unlock_irqrestore(&ctx->lock, flags); 1658 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1653 } 1659 }
1654 1660
1655 if (!ctx) { 1661 if (!ctx) {
@@ -1987,7 +1993,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1987 if (!value) 1993 if (!value)
1988 return -EINVAL; 1994 return -EINVAL;
1989 1995
1990 spin_lock_irq(&ctx->lock); 1996 raw_spin_lock_irq(&ctx->lock);
1991 if (event->attr.freq) { 1997 if (event->attr.freq) {
1992 if (value > sysctl_perf_event_sample_rate) { 1998 if (value > sysctl_perf_event_sample_rate) {
1993 ret = -EINVAL; 1999 ret = -EINVAL;
@@ -2000,7 +2006,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2000 event->hw.sample_period = value; 2006 event->hw.sample_period = value;
2001 } 2007 }
2002unlock: 2008unlock:
2003 spin_unlock_irq(&ctx->lock); 2009 raw_spin_unlock_irq(&ctx->lock);
2004 2010
2005 return ret; 2011 return ret;
2006} 2012}
@@ -3262,6 +3268,12 @@ static void perf_event_task_output(struct perf_event *event,
3262 3268
3263static int perf_event_task_match(struct perf_event *event) 3269static int perf_event_task_match(struct perf_event *event)
3264{ 3270{
3271 if (event->state != PERF_EVENT_STATE_ACTIVE)
3272 return 0;
3273
3274 if (event->cpu != -1 && event->cpu != smp_processor_id())
3275 return 0;
3276
3265 if (event->attr.comm || event->attr.mmap || event->attr.task) 3277 if (event->attr.comm || event->attr.mmap || event->attr.task)
3266 return 1; 3278 return 1;
3267 3279
@@ -3287,12 +3299,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3287 rcu_read_lock(); 3299 rcu_read_lock();
3288 cpuctx = &get_cpu_var(perf_cpu_context); 3300 cpuctx = &get_cpu_var(perf_cpu_context);
3289 perf_event_task_ctx(&cpuctx->ctx, task_event); 3301 perf_event_task_ctx(&cpuctx->ctx, task_event);
3290 put_cpu_var(perf_cpu_context);
3291
3292 if (!ctx) 3302 if (!ctx)
3293 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3303 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3294 if (ctx) 3304 if (ctx)
3295 perf_event_task_ctx(ctx, task_event); 3305 perf_event_task_ctx(ctx, task_event);
3306 put_cpu_var(perf_cpu_context);
3296 rcu_read_unlock(); 3307 rcu_read_unlock();
3297} 3308}
3298 3309
@@ -3369,6 +3380,12 @@ static void perf_event_comm_output(struct perf_event *event,
3369 3380
3370static int perf_event_comm_match(struct perf_event *event) 3381static int perf_event_comm_match(struct perf_event *event)
3371{ 3382{
3383 if (event->state != PERF_EVENT_STATE_ACTIVE)
3384 return 0;
3385
3386 if (event->cpu != -1 && event->cpu != smp_processor_id())
3387 return 0;
3388
3372 if (event->attr.comm) 3389 if (event->attr.comm)
3373 return 1; 3390 return 1;
3374 3391
@@ -3405,15 +3422,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3405 rcu_read_lock(); 3422 rcu_read_lock();
3406 cpuctx = &get_cpu_var(perf_cpu_context); 3423 cpuctx = &get_cpu_var(perf_cpu_context);
3407 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3424 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3408 put_cpu_var(perf_cpu_context);
3409
3410 /*
3411 * doesn't really matter which of the child contexts the
3412 * events ends up in.
3413 */
3414 ctx = rcu_dereference(current->perf_event_ctxp); 3425 ctx = rcu_dereference(current->perf_event_ctxp);
3415 if (ctx) 3426 if (ctx)
3416 perf_event_comm_ctx(ctx, comm_event); 3427 perf_event_comm_ctx(ctx, comm_event);
3428 put_cpu_var(perf_cpu_context);
3417 rcu_read_unlock(); 3429 rcu_read_unlock();
3418} 3430}
3419 3431
@@ -3488,6 +3500,12 @@ static void perf_event_mmap_output(struct perf_event *event,
3488static int perf_event_mmap_match(struct perf_event *event, 3500static int perf_event_mmap_match(struct perf_event *event,
3489 struct perf_mmap_event *mmap_event) 3501 struct perf_mmap_event *mmap_event)
3490{ 3502{
3503 if (event->state != PERF_EVENT_STATE_ACTIVE)
3504 return 0;
3505
3506 if (event->cpu != -1 && event->cpu != smp_processor_id())
3507 return 0;
3508
3491 if (event->attr.mmap) 3509 if (event->attr.mmap)
3492 return 1; 3510 return 1;
3493 3511
@@ -3561,15 +3579,10 @@ got_name:
3561 rcu_read_lock(); 3579 rcu_read_lock();
3562 cpuctx = &get_cpu_var(perf_cpu_context); 3580 cpuctx = &get_cpu_var(perf_cpu_context);
3563 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3581 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3564 put_cpu_var(perf_cpu_context);
3565
3566 /*
3567 * doesn't really matter which of the child contexts the
3568 * events ends up in.
3569 */
3570 ctx = rcu_dereference(current->perf_event_ctxp); 3582 ctx = rcu_dereference(current->perf_event_ctxp);
3571 if (ctx) 3583 if (ctx)
3572 perf_event_mmap_ctx(ctx, mmap_event); 3584 perf_event_mmap_ctx(ctx, mmap_event);
3585 put_cpu_var(perf_cpu_context);
3573 rcu_read_unlock(); 3586 rcu_read_unlock();
3574 3587
3575 kfree(buf); 3588 kfree(buf);
@@ -3860,6 +3873,9 @@ static int perf_swevent_match(struct perf_event *event,
3860 struct perf_sample_data *data, 3873 struct perf_sample_data *data,
3861 struct pt_regs *regs) 3874 struct pt_regs *regs)
3862{ 3875{
3876 if (event->cpu != -1 && event->cpu != smp_processor_id())
3877 return 0;
3878
3863 if (!perf_swevent_is_counting(event)) 3879 if (!perf_swevent_is_counting(event))
3864 return 0; 3880 return 0;
3865 3881
@@ -4564,7 +4580,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
4564 if (attr->type >= PERF_TYPE_MAX) 4580 if (attr->type >= PERF_TYPE_MAX)
4565 return -EINVAL; 4581 return -EINVAL;
4566 4582
4567 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) 4583 if (attr->__reserved_1 || attr->__reserved_2)
4568 return -EINVAL; 4584 return -EINVAL;
4569 4585
4570 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 4586 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4717,7 +4733,7 @@ SYSCALL_DEFINE5(perf_event_open,
4717 if (IS_ERR(event)) 4733 if (IS_ERR(event))
4718 goto err_put_context; 4734 goto err_put_context;
4719 4735
4720 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); 4736 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
4721 if (err < 0) 4737 if (err < 0)
4722 goto err_free_put_context; 4738 goto err_free_put_context;
4723 4739
@@ -4992,7 +5008,7 @@ void perf_event_exit_task(struct task_struct *child)
4992 * reading child->perf_event_ctxp, we wait until it has 5008 * reading child->perf_event_ctxp, we wait until it has
4993 * incremented the context's refcount before we do put_ctx below. 5009 * incremented the context's refcount before we do put_ctx below.
4994 */ 5010 */
4995 spin_lock(&child_ctx->lock); 5011 raw_spin_lock(&child_ctx->lock);
4996 child->perf_event_ctxp = NULL; 5012 child->perf_event_ctxp = NULL;
4997 /* 5013 /*
4998 * If this context is a clone; unclone it so it can't get 5014 * If this context is a clone; unclone it so it can't get
@@ -5001,7 +5017,7 @@ void perf_event_exit_task(struct task_struct *child)
5001 */ 5017 */
5002 unclone_ctx(child_ctx); 5018 unclone_ctx(child_ctx);
5003 update_context_time(child_ctx); 5019 update_context_time(child_ctx);
5004 spin_unlock_irqrestore(&child_ctx->lock, flags); 5020 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5005 5021
5006 /* 5022 /*
5007 * Report the task dead after unscheduling the events so that we 5023 * Report the task dead after unscheduling the events so that we
@@ -5141,7 +5157,7 @@ int perf_event_init_task(struct task_struct *child)
5141 GFP_KERNEL); 5157 GFP_KERNEL);
5142 if (!child_ctx) { 5158 if (!child_ctx) {
5143 ret = -ENOMEM; 5159 ret = -ENOMEM;
5144 goto exit; 5160 break;
5145 } 5161 }
5146 5162
5147 __perf_event_init_context(child_ctx, child); 5163 __perf_event_init_context(child_ctx, child);
@@ -5157,7 +5173,7 @@ int perf_event_init_task(struct task_struct *child)
5157 } 5173 }
5158 } 5174 }
5159 5175
5160 if (inherited_all) { 5176 if (child_ctx && inherited_all) {
5161 /* 5177 /*
5162 * Mark the child context as a clone of the parent 5178 * Mark the child context as a clone of the parent
5163 * context, or of whatever the parent is a clone of. 5179 * context, or of whatever the parent is a clone of.
@@ -5177,7 +5193,6 @@ int perf_event_init_task(struct task_struct *child)
5177 get_ctx(child_ctx->parent_ctx); 5193 get_ctx(child_ctx->parent_ctx);
5178 } 5194 }
5179 5195
5180exit:
5181 mutex_unlock(&parent_ctx->mutex); 5196 mutex_unlock(&parent_ctx->mutex);
5182 5197
5183 perf_unpin_context(parent_ctx); 5198 perf_unpin_context(parent_ctx);
@@ -5292,11 +5307,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5292 perf_reserved_percpu = val; 5307 perf_reserved_percpu = val;
5293 for_each_online_cpu(cpu) { 5308 for_each_online_cpu(cpu) {
5294 cpuctx = &per_cpu(perf_cpu_context, cpu); 5309 cpuctx = &per_cpu(perf_cpu_context, cpu);
5295 spin_lock_irq(&cpuctx->ctx.lock); 5310 raw_spin_lock_irq(&cpuctx->ctx.lock);
5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5311 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5297 perf_max_events - perf_reserved_percpu); 5312 perf_max_events - perf_reserved_percpu);
5298 cpuctx->max_pertask = mpt; 5313 cpuctx->max_pertask = mpt;
5299 spin_unlock_irq(&cpuctx->ctx.lock); 5314 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5300 } 5315 }
5301 spin_unlock(&perf_resource_lock); 5316 spin_unlock(&perf_resource_lock);
5302 5317