aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c110
1 files changed, 55 insertions, 55 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2e0aaa34fc7e..8ab86988bd24 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
203 * if so. If we locked the right context, then it 203 * if so. If we locked the right context, then it
204 * can't get swapped on us any more. 204 * can't get swapped on us any more.
205 */ 205 */
206 spin_lock_irqsave(&ctx->lock, *flags); 206 raw_spin_lock_irqsave(&ctx->lock, *flags);
207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208 spin_unlock_irqrestore(&ctx->lock, *flags); 208 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209 goto retry; 209 goto retry;
210 } 210 }
211 211
212 if (!atomic_inc_not_zero(&ctx->refcount)) { 212 if (!atomic_inc_not_zero(&ctx->refcount)) {
213 spin_unlock_irqrestore(&ctx->lock, *flags); 213 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214 ctx = NULL; 214 ctx = NULL;
215 } 215 }
216 } 216 }
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
231 ctx = perf_lock_task_context(task, &flags); 231 ctx = perf_lock_task_context(task, &flags);
232 if (ctx) { 232 if (ctx) {
233 ++ctx->pin_count; 233 ++ctx->pin_count;
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235 } 235 }
236 return ctx; 236 return ctx;
237} 237}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 242
243 spin_lock_irqsave(&ctx->lock, flags); 243 raw_spin_lock_irqsave(&ctx->lock, flags);
244 --ctx->pin_count; 244 --ctx->pin_count;
245 spin_unlock_irqrestore(&ctx->lock, flags); 245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
427 if (ctx->task && cpuctx->task_ctx != ctx) 427 if (ctx->task && cpuctx->task_ctx != ctx)
428 return; 428 return;
429 429
430 spin_lock(&ctx->lock); 430 raw_spin_lock(&ctx->lock);
431 /* 431 /*
432 * Protect the list operation against NMI by disabling the 432 * Protect the list operation against NMI by disabling the
433 * events on a global level. 433 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
449 } 449 }
450 450
451 perf_enable(); 451 perf_enable();
452 spin_unlock(&ctx->lock); 452 raw_spin_unlock(&ctx->lock);
453} 453}
454 454
455 455
@@ -476,7 +476,7 @@ static void perf_event_remove_from_context(struct perf_event *event)
476 if (!task) { 476 if (!task) {
477 /* 477 /*
478 * Per cpu events are removed via an smp call and 478 * Per cpu events are removed via an smp call and
479 * the removal is always sucessful. 479 * the removal is always successful.
480 */ 480 */
481 smp_call_function_single(event->cpu, 481 smp_call_function_single(event->cpu,
482 __perf_event_remove_from_context, 482 __perf_event_remove_from_context,
@@ -488,12 +488,12 @@ retry:
488 task_oncpu_function_call(task, __perf_event_remove_from_context, 488 task_oncpu_function_call(task, __perf_event_remove_from_context,
489 event); 489 event);
490 490
491 spin_lock_irq(&ctx->lock); 491 raw_spin_lock_irq(&ctx->lock);
492 /* 492 /*
493 * If the context is active we need to retry the smp call. 493 * If the context is active we need to retry the smp call.
494 */ 494 */
495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 spin_unlock_irq(&ctx->lock); 496 raw_spin_unlock_irq(&ctx->lock);
497 goto retry; 497 goto retry;
498 } 498 }
499 499
@@ -504,7 +504,7 @@ retry:
504 */ 504 */
505 if (!list_empty(&event->group_entry)) 505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx); 506 list_del_event(event, ctx);
507 spin_unlock_irq(&ctx->lock); 507 raw_spin_unlock_irq(&ctx->lock);
508} 508}
509 509
510/* 510/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
535 if (ctx->task && cpuctx->task_ctx != ctx) 535 if (ctx->task && cpuctx->task_ctx != ctx)
536 return; 536 return;
537 537
538 spin_lock(&ctx->lock); 538 raw_spin_lock(&ctx->lock);
539 539
540 /* 540 /*
541 * If the event is on, turn it off. 541 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
551 event->state = PERF_EVENT_STATE_OFF; 551 event->state = PERF_EVENT_STATE_OFF;
552 } 552 }
553 553
554 spin_unlock(&ctx->lock); 554 raw_spin_unlock(&ctx->lock);
555} 555}
556 556
557/* 557/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
584 retry: 584 retry:
585 task_oncpu_function_call(task, __perf_event_disable, event); 585 task_oncpu_function_call(task, __perf_event_disable, event);
586 586
587 spin_lock_irq(&ctx->lock); 587 raw_spin_lock_irq(&ctx->lock);
588 /* 588 /*
589 * If the event is still active, we need to retry the cross-call. 589 * If the event is still active, we need to retry the cross-call.
590 */ 590 */
591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 spin_unlock_irq(&ctx->lock); 592 raw_spin_unlock_irq(&ctx->lock);
593 goto retry; 593 goto retry;
594 } 594 }
595 595
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
602 event->state = PERF_EVENT_STATE_OFF; 602 event->state = PERF_EVENT_STATE_OFF;
603 } 603 }
604 604
605 spin_unlock_irq(&ctx->lock); 605 raw_spin_unlock_irq(&ctx->lock);
606} 606}
607 607
608static int 608static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
770 cpuctx->task_ctx = ctx; 770 cpuctx->task_ctx = ctx;
771 } 771 }
772 772
773 spin_lock(&ctx->lock); 773 raw_spin_lock(&ctx->lock);
774 ctx->is_active = 1; 774 ctx->is_active = 1;
775 update_context_time(ctx); 775 update_context_time(ctx);
776 776
@@ -823,7 +823,7 @@ static void __perf_install_in_context(void *info)
823 unlock: 823 unlock:
824 perf_enable(); 824 perf_enable();
825 825
826 spin_unlock(&ctx->lock); 826 raw_spin_unlock(&ctx->lock);
827} 827}
828 828
829/* 829/*
@@ -848,7 +848,7 @@ perf_install_in_context(struct perf_event_context *ctx,
848 if (!task) { 848 if (!task) {
849 /* 849 /*
850 * Per cpu events are installed via an smp call and 850 * Per cpu events are installed via an smp call and
851 * the install is always sucessful. 851 * the install is always successful.
852 */ 852 */
853 smp_call_function_single(cpu, __perf_install_in_context, 853 smp_call_function_single(cpu, __perf_install_in_context,
854 event, 1); 854 event, 1);
@@ -859,12 +859,12 @@ retry:
859 task_oncpu_function_call(task, __perf_install_in_context, 859 task_oncpu_function_call(task, __perf_install_in_context,
860 event); 860 event);
861 861
862 spin_lock_irq(&ctx->lock); 862 raw_spin_lock_irq(&ctx->lock);
863 /* 863 /*
864 * we need to retry the smp call. 864 * we need to retry the smp call.
865 */ 865 */
866 if (ctx->is_active && list_empty(&event->group_entry)) { 866 if (ctx->is_active && list_empty(&event->group_entry)) {
867 spin_unlock_irq(&ctx->lock); 867 raw_spin_unlock_irq(&ctx->lock);
868 goto retry; 868 goto retry;
869 } 869 }
870 870
@@ -875,7 +875,7 @@ retry:
875 */ 875 */
876 if (list_empty(&event->group_entry)) 876 if (list_empty(&event->group_entry))
877 add_event_to_ctx(event, ctx); 877 add_event_to_ctx(event, ctx);
878 spin_unlock_irq(&ctx->lock); 878 raw_spin_unlock_irq(&ctx->lock);
879} 879}
880 880
881/* 881/*
@@ -920,7 +920,7 @@ static void __perf_event_enable(void *info)
920 cpuctx->task_ctx = ctx; 920 cpuctx->task_ctx = ctx;
921 } 921 }
922 922
923 spin_lock(&ctx->lock); 923 raw_spin_lock(&ctx->lock);
924 ctx->is_active = 1; 924 ctx->is_active = 1;
925 update_context_time(ctx); 925 update_context_time(ctx);
926 926
@@ -965,7 +965,7 @@ static void __perf_event_enable(void *info)
965 } 965 }
966 966
967 unlock: 967 unlock:
968 spin_unlock(&ctx->lock); 968 raw_spin_unlock(&ctx->lock);
969} 969}
970 970
971/* 971/*
@@ -991,7 +991,7 @@ void perf_event_enable(struct perf_event *event)
991 return; 991 return;
992 } 992 }
993 993
994 spin_lock_irq(&ctx->lock); 994 raw_spin_lock_irq(&ctx->lock);
995 if (event->state >= PERF_EVENT_STATE_INACTIVE) 995 if (event->state >= PERF_EVENT_STATE_INACTIVE)
996 goto out; 996 goto out;
997 997
@@ -1006,10 +1006,10 @@ void perf_event_enable(struct perf_event *event)
1006 event->state = PERF_EVENT_STATE_OFF; 1006 event->state = PERF_EVENT_STATE_OFF;
1007 1007
1008 retry: 1008 retry:
1009 spin_unlock_irq(&ctx->lock); 1009 raw_spin_unlock_irq(&ctx->lock);
1010 task_oncpu_function_call(task, __perf_event_enable, event); 1010 task_oncpu_function_call(task, __perf_event_enable, event);
1011 1011
1012 spin_lock_irq(&ctx->lock); 1012 raw_spin_lock_irq(&ctx->lock);
1013 1013
1014 /* 1014 /*
1015 * If the context is active and the event is still off, 1015 * If the context is active and the event is still off,
@@ -1026,7 +1026,7 @@ void perf_event_enable(struct perf_event *event)
1026 __perf_event_mark_enabled(event, ctx); 1026 __perf_event_mark_enabled(event, ctx);
1027 1027
1028 out: 1028 out:
1029 spin_unlock_irq(&ctx->lock); 1029 raw_spin_unlock_irq(&ctx->lock);
1030} 1030}
1031 1031
1032static int perf_event_refresh(struct perf_event *event, int refresh) 1032static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1048,7 +1048,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1048{ 1048{
1049 struct perf_event *event; 1049 struct perf_event *event;
1050 1050
1051 spin_lock(&ctx->lock); 1051 raw_spin_lock(&ctx->lock);
1052 ctx->is_active = 0; 1052 ctx->is_active = 0;
1053 if (likely(!ctx->nr_events)) 1053 if (likely(!ctx->nr_events))
1054 goto out; 1054 goto out;
@@ -1061,7 +1061,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1061 } 1061 }
1062 perf_enable(); 1062 perf_enable();
1063 out: 1063 out:
1064 spin_unlock(&ctx->lock); 1064 raw_spin_unlock(&ctx->lock);
1065} 1065}
1066 1066
1067/* 1067/*
@@ -1199,8 +1199,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1199 * order we take the locks because no other cpu could 1199 * order we take the locks because no other cpu could
1200 * be trying to lock both of these tasks. 1200 * be trying to lock both of these tasks.
1201 */ 1201 */
1202 spin_lock(&ctx->lock); 1202 raw_spin_lock(&ctx->lock);
1203 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1203 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1204 if (context_equiv(ctx, next_ctx)) { 1204 if (context_equiv(ctx, next_ctx)) {
1205 /* 1205 /*
1206 * XXX do we need a memory barrier of sorts 1206 * XXX do we need a memory barrier of sorts
@@ -1214,8 +1214,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1214 1214
1215 perf_event_sync_stat(ctx, next_ctx); 1215 perf_event_sync_stat(ctx, next_ctx);
1216 } 1216 }
1217 spin_unlock(&next_ctx->lock); 1217 raw_spin_unlock(&next_ctx->lock);
1218 spin_unlock(&ctx->lock); 1218 raw_spin_unlock(&ctx->lock);
1219 } 1219 }
1220 rcu_read_unlock(); 1220 rcu_read_unlock();
1221 1221
@@ -1257,7 +1257,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1257 struct perf_event *event; 1257 struct perf_event *event;
1258 int can_add_hw = 1; 1258 int can_add_hw = 1;
1259 1259
1260 spin_lock(&ctx->lock); 1260 raw_spin_lock(&ctx->lock);
1261 ctx->is_active = 1; 1261 ctx->is_active = 1;
1262 if (likely(!ctx->nr_events)) 1262 if (likely(!ctx->nr_events))
1263 goto out; 1263 goto out;
@@ -1312,7 +1312,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1312 } 1312 }
1313 perf_enable(); 1313 perf_enable();
1314 out: 1314 out:
1315 spin_unlock(&ctx->lock); 1315 raw_spin_unlock(&ctx->lock);
1316} 1316}
1317 1317
1318/* 1318/*
@@ -1376,7 +1376,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1376 struct hw_perf_event *hwc; 1376 struct hw_perf_event *hwc;
1377 u64 interrupts, freq; 1377 u64 interrupts, freq;
1378 1378
1379 spin_lock(&ctx->lock); 1379 raw_spin_lock(&ctx->lock);
1380 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1380 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1381 if (event->state != PERF_EVENT_STATE_ACTIVE) 1381 if (event->state != PERF_EVENT_STATE_ACTIVE)
1382 continue; 1382 continue;
@@ -1431,7 +1431,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1431 perf_enable(); 1431 perf_enable();
1432 } 1432 }
1433 } 1433 }
1434 spin_unlock(&ctx->lock); 1434 raw_spin_unlock(&ctx->lock);
1435} 1435}
1436 1436
1437/* 1437/*
@@ -1444,7 +1444,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1444 if (!ctx->nr_events) 1444 if (!ctx->nr_events)
1445 return; 1445 return;
1446 1446
1447 spin_lock(&ctx->lock); 1447 raw_spin_lock(&ctx->lock);
1448 /* 1448 /*
1449 * Rotate the first entry last (works just fine for group events too): 1449 * Rotate the first entry last (works just fine for group events too):
1450 */ 1450 */
@@ -1455,7 +1455,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1455 } 1455 }
1456 perf_enable(); 1456 perf_enable();
1457 1457
1458 spin_unlock(&ctx->lock); 1458 raw_spin_unlock(&ctx->lock);
1459} 1459}
1460 1460
1461void perf_event_task_tick(struct task_struct *curr, int cpu) 1461void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1504,7 +1504,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1504 1504
1505 __perf_event_task_sched_out(ctx); 1505 __perf_event_task_sched_out(ctx);
1506 1506
1507 spin_lock(&ctx->lock); 1507 raw_spin_lock(&ctx->lock);
1508 1508
1509 list_for_each_entry(event, &ctx->group_list, group_entry) { 1509 list_for_each_entry(event, &ctx->group_list, group_entry) {
1510 if (!event->attr.enable_on_exec) 1510 if (!event->attr.enable_on_exec)
@@ -1522,7 +1522,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1522 if (enabled) 1522 if (enabled)
1523 unclone_ctx(ctx); 1523 unclone_ctx(ctx);
1524 1524
1525 spin_unlock(&ctx->lock); 1525 raw_spin_unlock(&ctx->lock);
1526 1526
1527 perf_event_task_sched_in(task, smp_processor_id()); 1527 perf_event_task_sched_in(task, smp_processor_id());
1528 out: 1528 out:
@@ -1548,10 +1548,10 @@ static void __perf_event_read(void *info)
1548 if (ctx->task && cpuctx->task_ctx != ctx) 1548 if (ctx->task && cpuctx->task_ctx != ctx)
1549 return; 1549 return;
1550 1550
1551 spin_lock(&ctx->lock); 1551 raw_spin_lock(&ctx->lock);
1552 update_context_time(ctx); 1552 update_context_time(ctx);
1553 update_event_times(event); 1553 update_event_times(event);
1554 spin_unlock(&ctx->lock); 1554 raw_spin_unlock(&ctx->lock);
1555 1555
1556 event->pmu->read(event); 1556 event->pmu->read(event);
1557} 1557}
@@ -1569,10 +1569,10 @@ static u64 perf_event_read(struct perf_event *event)
1569 struct perf_event_context *ctx = event->ctx; 1569 struct perf_event_context *ctx = event->ctx;
1570 unsigned long flags; 1570 unsigned long flags;
1571 1571
1572 spin_lock_irqsave(&ctx->lock, flags); 1572 raw_spin_lock_irqsave(&ctx->lock, flags);
1573 update_context_time(ctx); 1573 update_context_time(ctx);
1574 update_event_times(event); 1574 update_event_times(event);
1575 spin_unlock_irqrestore(&ctx->lock, flags); 1575 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1576 } 1576 }
1577 1577
1578 return atomic64_read(&event->count); 1578 return atomic64_read(&event->count);
@@ -1585,7 +1585,7 @@ static void
1585__perf_event_init_context(struct perf_event_context *ctx, 1585__perf_event_init_context(struct perf_event_context *ctx,
1586 struct task_struct *task) 1586 struct task_struct *task)
1587{ 1587{
1588 spin_lock_init(&ctx->lock); 1588 raw_spin_lock_init(&ctx->lock);
1589 mutex_init(&ctx->mutex); 1589 mutex_init(&ctx->mutex);
1590 INIT_LIST_HEAD(&ctx->group_list); 1590 INIT_LIST_HEAD(&ctx->group_list);
1591 INIT_LIST_HEAD(&ctx->event_list); 1591 INIT_LIST_HEAD(&ctx->event_list);
@@ -1652,7 +1652,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1652 ctx = perf_lock_task_context(task, &flags); 1652 ctx = perf_lock_task_context(task, &flags);
1653 if (ctx) { 1653 if (ctx) {
1654 unclone_ctx(ctx); 1654 unclone_ctx(ctx);
1655 spin_unlock_irqrestore(&ctx->lock, flags); 1655 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1656 } 1656 }
1657 1657
1658 if (!ctx) { 1658 if (!ctx) {
@@ -1990,7 +1990,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1990 if (!value) 1990 if (!value)
1991 return -EINVAL; 1991 return -EINVAL;
1992 1992
1993 spin_lock_irq(&ctx->lock); 1993 raw_spin_lock_irq(&ctx->lock);
1994 if (event->attr.freq) { 1994 if (event->attr.freq) {
1995 if (value > sysctl_perf_event_sample_rate) { 1995 if (value > sysctl_perf_event_sample_rate) {
1996 ret = -EINVAL; 1996 ret = -EINVAL;
@@ -2003,7 +2003,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2003 event->hw.sample_period = value; 2003 event->hw.sample_period = value;
2004 } 2004 }
2005unlock: 2005unlock:
2006 spin_unlock_irq(&ctx->lock); 2006 raw_spin_unlock_irq(&ctx->lock);
2007 2007
2008 return ret; 2008 return ret;
2009} 2009}
@@ -4995,7 +4995,7 @@ void perf_event_exit_task(struct task_struct *child)
4995 * reading child->perf_event_ctxp, we wait until it has 4995 * reading child->perf_event_ctxp, we wait until it has
4996 * incremented the context's refcount before we do put_ctx below. 4996 * incremented the context's refcount before we do put_ctx below.
4997 */ 4997 */
4998 spin_lock(&child_ctx->lock); 4998 raw_spin_lock(&child_ctx->lock);
4999 child->perf_event_ctxp = NULL; 4999 child->perf_event_ctxp = NULL;
5000 /* 5000 /*
5001 * If this context is a clone; unclone it so it can't get 5001 * If this context is a clone; unclone it so it can't get
@@ -5004,7 +5004,7 @@ void perf_event_exit_task(struct task_struct *child)
5004 */ 5004 */
5005 unclone_ctx(child_ctx); 5005 unclone_ctx(child_ctx);
5006 update_context_time(child_ctx); 5006 update_context_time(child_ctx);
5007 spin_unlock_irqrestore(&child_ctx->lock, flags); 5007 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5008 5008
5009 /* 5009 /*
5010 * Report the task dead after unscheduling the events so that we 5010 * Report the task dead after unscheduling the events so that we
@@ -5295,11 +5295,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5295 perf_reserved_percpu = val; 5295 perf_reserved_percpu = val;
5296 for_each_online_cpu(cpu) { 5296 for_each_online_cpu(cpu) {
5297 cpuctx = &per_cpu(perf_cpu_context, cpu); 5297 cpuctx = &per_cpu(perf_cpu_context, cpu);
5298 spin_lock_irq(&cpuctx->ctx.lock); 5298 raw_spin_lock_irq(&cpuctx->ctx.lock);
5299 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5299 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5300 perf_max_events - perf_reserved_percpu); 5300 perf_max_events - perf_reserved_percpu);
5301 cpuctx->max_pertask = mpt; 5301 cpuctx->max_pertask = mpt;
5302 spin_unlock_irq(&cpuctx->ctx.lock); 5302 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5303 } 5303 }
5304 spin_unlock(&perf_resource_lock); 5304 spin_unlock(&perf_resource_lock);
5305 5305