aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967 /kernel/perf_event.c
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c106
1 files changed, 53 insertions, 53 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e73e53c7582f..9052d6c8c9fd 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
203 * if so. If we locked the right context, then it 203 * if so. If we locked the right context, then it
204 * can't get swapped on us any more. 204 * can't get swapped on us any more.
205 */ 205 */
206 spin_lock_irqsave(&ctx->lock, *flags); 206 raw_spin_lock_irqsave(&ctx->lock, *flags);
207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208 spin_unlock_irqrestore(&ctx->lock, *flags); 208 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209 goto retry; 209 goto retry;
210 } 210 }
211 211
212 if (!atomic_inc_not_zero(&ctx->refcount)) { 212 if (!atomic_inc_not_zero(&ctx->refcount)) {
213 spin_unlock_irqrestore(&ctx->lock, *flags); 213 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214 ctx = NULL; 214 ctx = NULL;
215 } 215 }
216 } 216 }
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
231 ctx = perf_lock_task_context(task, &flags); 231 ctx = perf_lock_task_context(task, &flags);
232 if (ctx) { 232 if (ctx) {
233 ++ctx->pin_count; 233 ++ctx->pin_count;
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235 } 235 }
236 return ctx; 236 return ctx;
237} 237}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 242
243 spin_lock_irqsave(&ctx->lock, flags); 243 raw_spin_lock_irqsave(&ctx->lock, flags);
244 --ctx->pin_count; 244 --ctx->pin_count;
245 spin_unlock_irqrestore(&ctx->lock, flags); 245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
427 if (ctx->task && cpuctx->task_ctx != ctx) 427 if (ctx->task && cpuctx->task_ctx != ctx)
428 return; 428 return;
429 429
430 spin_lock(&ctx->lock); 430 raw_spin_lock(&ctx->lock);
431 /* 431 /*
432 * Protect the list operation against NMI by disabling the 432 * Protect the list operation against NMI by disabling the
433 * events on a global level. 433 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
449 } 449 }
450 450
451 perf_enable(); 451 perf_enable();
452 spin_unlock(&ctx->lock); 452 raw_spin_unlock(&ctx->lock);
453} 453}
454 454
455 455
@@ -488,12 +488,12 @@ retry:
488 task_oncpu_function_call(task, __perf_event_remove_from_context, 488 task_oncpu_function_call(task, __perf_event_remove_from_context,
489 event); 489 event);
490 490
491 spin_lock_irq(&ctx->lock); 491 raw_spin_lock_irq(&ctx->lock);
492 /* 492 /*
493 * If the context is active we need to retry the smp call. 493 * If the context is active we need to retry the smp call.
494 */ 494 */
495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 spin_unlock_irq(&ctx->lock); 496 raw_spin_unlock_irq(&ctx->lock);
497 goto retry; 497 goto retry;
498 } 498 }
499 499
@@ -504,7 +504,7 @@ retry:
504 */ 504 */
505 if (!list_empty(&event->group_entry)) 505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx); 506 list_del_event(event, ctx);
507 spin_unlock_irq(&ctx->lock); 507 raw_spin_unlock_irq(&ctx->lock);
508} 508}
509 509
510/* 510/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
535 if (ctx->task && cpuctx->task_ctx != ctx) 535 if (ctx->task && cpuctx->task_ctx != ctx)
536 return; 536 return;
537 537
538 spin_lock(&ctx->lock); 538 raw_spin_lock(&ctx->lock);
539 539
540 /* 540 /*
541 * If the event is on, turn it off. 541 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
551 event->state = PERF_EVENT_STATE_OFF; 551 event->state = PERF_EVENT_STATE_OFF;
552 } 552 }
553 553
554 spin_unlock(&ctx->lock); 554 raw_spin_unlock(&ctx->lock);
555} 555}
556 556
557/* 557/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
584 retry: 584 retry:
585 task_oncpu_function_call(task, __perf_event_disable, event); 585 task_oncpu_function_call(task, __perf_event_disable, event);
586 586
587 spin_lock_irq(&ctx->lock); 587 raw_spin_lock_irq(&ctx->lock);
588 /* 588 /*
589 * If the event is still active, we need to retry the cross-call. 589 * If the event is still active, we need to retry the cross-call.
590 */ 590 */
591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 spin_unlock_irq(&ctx->lock); 592 raw_spin_unlock_irq(&ctx->lock);
593 goto retry; 593 goto retry;
594 } 594 }
595 595
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
602 event->state = PERF_EVENT_STATE_OFF; 602 event->state = PERF_EVENT_STATE_OFF;
603 } 603 }
604 604
605 spin_unlock_irq(&ctx->lock); 605 raw_spin_unlock_irq(&ctx->lock);
606} 606}
607 607
608static int 608static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
770 cpuctx->task_ctx = ctx; 770 cpuctx->task_ctx = ctx;
771 } 771 }
772 772
773 spin_lock(&ctx->lock); 773 raw_spin_lock(&ctx->lock);
774 ctx->is_active = 1; 774 ctx->is_active = 1;
775 update_context_time(ctx); 775 update_context_time(ctx);
776 776
@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
820 unlock: 820 unlock:
821 perf_enable(); 821 perf_enable();
822 822
823 spin_unlock(&ctx->lock); 823 raw_spin_unlock(&ctx->lock);
824} 824}
825 825
826/* 826/*
@@ -856,12 +856,12 @@ retry:
856 task_oncpu_function_call(task, __perf_install_in_context, 856 task_oncpu_function_call(task, __perf_install_in_context,
857 event); 857 event);
858 858
859 spin_lock_irq(&ctx->lock); 859 raw_spin_lock_irq(&ctx->lock);
860 /* 860 /*
861 * we need to retry the smp call. 861 * we need to retry the smp call.
862 */ 862 */
863 if (ctx->is_active && list_empty(&event->group_entry)) { 863 if (ctx->is_active && list_empty(&event->group_entry)) {
864 spin_unlock_irq(&ctx->lock); 864 raw_spin_unlock_irq(&ctx->lock);
865 goto retry; 865 goto retry;
866 } 866 }
867 867
@@ -872,7 +872,7 @@ retry:
872 */ 872 */
873 if (list_empty(&event->group_entry)) 873 if (list_empty(&event->group_entry))
874 add_event_to_ctx(event, ctx); 874 add_event_to_ctx(event, ctx);
875 spin_unlock_irq(&ctx->lock); 875 raw_spin_unlock_irq(&ctx->lock);
876} 876}
877 877
878/* 878/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
917 cpuctx->task_ctx = ctx; 917 cpuctx->task_ctx = ctx;
918 } 918 }
919 919
920 spin_lock(&ctx->lock); 920 raw_spin_lock(&ctx->lock);
921 ctx->is_active = 1; 921 ctx->is_active = 1;
922 update_context_time(ctx); 922 update_context_time(ctx);
923 923
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
959 } 959 }
960 960
961 unlock: 961 unlock:
962 spin_unlock(&ctx->lock); 962 raw_spin_unlock(&ctx->lock);
963} 963}
964 964
965/* 965/*
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
985 return; 985 return;
986 } 986 }
987 987
988 spin_lock_irq(&ctx->lock); 988 raw_spin_lock_irq(&ctx->lock);
989 if (event->state >= PERF_EVENT_STATE_INACTIVE) 989 if (event->state >= PERF_EVENT_STATE_INACTIVE)
990 goto out; 990 goto out;
991 991
@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
1000 event->state = PERF_EVENT_STATE_OFF; 1000 event->state = PERF_EVENT_STATE_OFF;
1001 1001
1002 retry: 1002 retry:
1003 spin_unlock_irq(&ctx->lock); 1003 raw_spin_unlock_irq(&ctx->lock);
1004 task_oncpu_function_call(task, __perf_event_enable, event); 1004 task_oncpu_function_call(task, __perf_event_enable, event);
1005 1005
1006 spin_lock_irq(&ctx->lock); 1006 raw_spin_lock_irq(&ctx->lock);
1007 1007
1008 /* 1008 /*
1009 * If the context is active and the event is still off, 1009 * If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
1020 __perf_event_mark_enabled(event, ctx); 1020 __perf_event_mark_enabled(event, ctx);
1021 1021
1022 out: 1022 out:
1023 spin_unlock_irq(&ctx->lock); 1023 raw_spin_unlock_irq(&ctx->lock);
1024} 1024}
1025 1025
1026static int perf_event_refresh(struct perf_event *event, int refresh) 1026static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1042{ 1042{
1043 struct perf_event *event; 1043 struct perf_event *event;
1044 1044
1045 spin_lock(&ctx->lock); 1045 raw_spin_lock(&ctx->lock);
1046 ctx->is_active = 0; 1046 ctx->is_active = 0;
1047 if (likely(!ctx->nr_events)) 1047 if (likely(!ctx->nr_events))
1048 goto out; 1048 goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1055 } 1055 }
1056 perf_enable(); 1056 perf_enable();
1057 out: 1057 out:
1058 spin_unlock(&ctx->lock); 1058 raw_spin_unlock(&ctx->lock);
1059} 1059}
1060 1060
1061/* 1061/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1193 * order we take the locks because no other cpu could 1193 * order we take the locks because no other cpu could
1194 * be trying to lock both of these tasks. 1194 * be trying to lock both of these tasks.
1195 */ 1195 */
1196 spin_lock(&ctx->lock); 1196 raw_spin_lock(&ctx->lock);
1197 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1197 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1198 if (context_equiv(ctx, next_ctx)) { 1198 if (context_equiv(ctx, next_ctx)) {
1199 /* 1199 /*
1200 * XXX do we need a memory barrier of sorts 1200 * XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1208 1208
1209 perf_event_sync_stat(ctx, next_ctx); 1209 perf_event_sync_stat(ctx, next_ctx);
1210 } 1210 }
1211 spin_unlock(&next_ctx->lock); 1211 raw_spin_unlock(&next_ctx->lock);
1212 spin_unlock(&ctx->lock); 1212 raw_spin_unlock(&ctx->lock);
1213 } 1213 }
1214 rcu_read_unlock(); 1214 rcu_read_unlock();
1215 1215
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1251 struct perf_event *event; 1251 struct perf_event *event;
1252 int can_add_hw = 1; 1252 int can_add_hw = 1;
1253 1253
1254 spin_lock(&ctx->lock); 1254 raw_spin_lock(&ctx->lock);
1255 ctx->is_active = 1; 1255 ctx->is_active = 1;
1256 if (likely(!ctx->nr_events)) 1256 if (likely(!ctx->nr_events))
1257 goto out; 1257 goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1306 } 1306 }
1307 perf_enable(); 1307 perf_enable();
1308 out: 1308 out:
1309 spin_unlock(&ctx->lock); 1309 raw_spin_unlock(&ctx->lock);
1310} 1310}
1311 1311
1312/* 1312/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1370 struct hw_perf_event *hwc; 1370 struct hw_perf_event *hwc;
1371 u64 interrupts, freq; 1371 u64 interrupts, freq;
1372 1372
1373 spin_lock(&ctx->lock); 1373 raw_spin_lock(&ctx->lock);
1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1375 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376 continue; 1376 continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1425 perf_enable(); 1425 perf_enable();
1426 } 1426 }
1427 } 1427 }
1428 spin_unlock(&ctx->lock); 1428 raw_spin_unlock(&ctx->lock);
1429} 1429}
1430 1430
1431/* 1431/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1438 if (!ctx->nr_events) 1438 if (!ctx->nr_events)
1439 return; 1439 return;
1440 1440
1441 spin_lock(&ctx->lock); 1441 raw_spin_lock(&ctx->lock);
1442 /* 1442 /*
1443 * Rotate the first entry last (works just fine for group events too): 1443 * Rotate the first entry last (works just fine for group events too):
1444 */ 1444 */
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1449 } 1449 }
1450 perf_enable(); 1450 perf_enable();
1451 1451
1452 spin_unlock(&ctx->lock); 1452 raw_spin_unlock(&ctx->lock);
1453} 1453}
1454 1454
1455void perf_event_task_tick(struct task_struct *curr, int cpu) 1455void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1498 1498
1499 __perf_event_task_sched_out(ctx); 1499 __perf_event_task_sched_out(ctx);
1500 1500
1501 spin_lock(&ctx->lock); 1501 raw_spin_lock(&ctx->lock);
1502 1502
1503 list_for_each_entry(event, &ctx->group_list, group_entry) { 1503 list_for_each_entry(event, &ctx->group_list, group_entry) {
1504 if (!event->attr.enable_on_exec) 1504 if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1516 if (enabled) 1516 if (enabled)
1517 unclone_ctx(ctx); 1517 unclone_ctx(ctx);
1518 1518
1519 spin_unlock(&ctx->lock); 1519 raw_spin_unlock(&ctx->lock);
1520 1520
1521 perf_event_task_sched_in(task, smp_processor_id()); 1521 perf_event_task_sched_in(task, smp_processor_id());
1522 out: 1522 out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
1542 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1543 return; 1543 return;
1544 1544
1545 spin_lock(&ctx->lock); 1545 raw_spin_lock(&ctx->lock);
1546 update_context_time(ctx); 1546 update_context_time(ctx);
1547 update_event_times(event); 1547 update_event_times(event);
1548 spin_unlock(&ctx->lock); 1548 raw_spin_unlock(&ctx->lock);
1549 1549
1550 event->pmu->read(event); 1550 event->pmu->read(event);
1551} 1551}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
1563 struct perf_event_context *ctx = event->ctx; 1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags; 1564 unsigned long flags;
1565 1565
1566 spin_lock_irqsave(&ctx->lock, flags); 1566 raw_spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx); 1567 update_context_time(ctx);
1568 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags); 1569 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1570 } 1570 }
1571 1571
1572 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1579,7 +1579,7 @@ static void
1579__perf_event_init_context(struct perf_event_context *ctx, 1579__perf_event_init_context(struct perf_event_context *ctx,
1580 struct task_struct *task) 1580 struct task_struct *task)
1581{ 1581{
1582 spin_lock_init(&ctx->lock); 1582 raw_spin_lock_init(&ctx->lock);
1583 mutex_init(&ctx->mutex); 1583 mutex_init(&ctx->mutex);
1584 INIT_LIST_HEAD(&ctx->group_list); 1584 INIT_LIST_HEAD(&ctx->group_list);
1585 INIT_LIST_HEAD(&ctx->event_list); 1585 INIT_LIST_HEAD(&ctx->event_list);
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1649 ctx = perf_lock_task_context(task, &flags); 1649 ctx = perf_lock_task_context(task, &flags);
1650 if (ctx) { 1650 if (ctx) {
1651 unclone_ctx(ctx); 1651 unclone_ctx(ctx);
1652 spin_unlock_irqrestore(&ctx->lock, flags); 1652 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1653 } 1653 }
1654 1654
1655 if (!ctx) { 1655 if (!ctx) {
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1987 if (!value) 1987 if (!value)
1988 return -EINVAL; 1988 return -EINVAL;
1989 1989
1990 spin_lock_irq(&ctx->lock); 1990 raw_spin_lock_irq(&ctx->lock);
1991 if (event->attr.freq) { 1991 if (event->attr.freq) {
1992 if (value > sysctl_perf_event_sample_rate) { 1992 if (value > sysctl_perf_event_sample_rate) {
1993 ret = -EINVAL; 1993 ret = -EINVAL;
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2000 event->hw.sample_period = value; 2000 event->hw.sample_period = value;
2001 } 2001 }
2002unlock: 2002unlock:
2003 spin_unlock_irq(&ctx->lock); 2003 raw_spin_unlock_irq(&ctx->lock);
2004 2004
2005 return ret; 2005 return ret;
2006} 2006}
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
4992 * reading child->perf_event_ctxp, we wait until it has 4992 * reading child->perf_event_ctxp, we wait until it has
4993 * incremented the context's refcount before we do put_ctx below. 4993 * incremented the context's refcount before we do put_ctx below.
4994 */ 4994 */
4995 spin_lock(&child_ctx->lock); 4995 raw_spin_lock(&child_ctx->lock);
4996 child->perf_event_ctxp = NULL; 4996 child->perf_event_ctxp = NULL;
4997 /* 4997 /*
4998 * If this context is a clone; unclone it so it can't get 4998 * If this context is a clone; unclone it so it can't get
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
5001 */ 5001 */
5002 unclone_ctx(child_ctx); 5002 unclone_ctx(child_ctx);
5003 update_context_time(child_ctx); 5003 update_context_time(child_ctx);
5004 spin_unlock_irqrestore(&child_ctx->lock, flags); 5004 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5005 5005
5006 /* 5006 /*
5007 * Report the task dead after unscheduling the events so that we 5007 * Report the task dead after unscheduling the events so that we
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5292 perf_reserved_percpu = val; 5292 perf_reserved_percpu = val;
5293 for_each_online_cpu(cpu) { 5293 for_each_online_cpu(cpu) {
5294 cpuctx = &per_cpu(perf_cpu_context, cpu); 5294 cpuctx = &per_cpu(perf_cpu_context, cpu);
5295 spin_lock_irq(&cpuctx->ctx.lock); 5295 raw_spin_lock_irq(&cpuctx->ctx.lock);
5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5297 perf_max_events - perf_reserved_percpu); 5297 perf_max_events - perf_reserved_percpu);
5298 cpuctx->max_pertask = mpt; 5298 cpuctx->max_pertask = mpt;
5299 spin_unlock_irq(&cpuctx->ctx.lock); 5299 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5300 } 5300 }
5301 spin_unlock(&perf_resource_lock); 5301 spin_unlock(&perf_resource_lock);
5302 5302