aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-29 08:51:57 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-29 10:21:51 -0400
commit665c2142a94202881a3c11cbaee6506cb10ada2d (patch)
tree14a518bf3afe81e7300a822e9b6f55980ff89d46 /kernel/perf_counter.c
parentefb3d17240d80e27508d238809168120fe4b93a4 (diff)
perf_counter: Clean up task_ctx vs interrupts
Remove the local_irq_save() etc.. in routines that are smp function calls, or have IRQs disabled by other means. Then change the COMM, MMAP, and swcounter context iteration to current->perf_counter_ctxp and RCU, since it really doesn't matter which context they iterate, they're all folded. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c82
1 files changed, 50 insertions, 32 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 58d6d198faa2..0c000d305e0e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -232,18 +232,14 @@ static void __perf_counter_remove_from_context(void *info)
232 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 232 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
233 struct perf_counter *counter = info; 233 struct perf_counter *counter = info;
234 struct perf_counter_context *ctx = counter->ctx; 234 struct perf_counter_context *ctx = counter->ctx;
235 unsigned long flags;
236 235
237 local_irq_save(flags);
238 /* 236 /*
239 * If this is a task context, we need to check whether it is 237 * If this is a task context, we need to check whether it is
240 * the current task context of this cpu. If not it has been 238 * the current task context of this cpu. If not it has been
241 * scheduled out before the smp call arrived. 239 * scheduled out before the smp call arrived.
242 */ 240 */
243 if (ctx->task && cpuctx->task_ctx != ctx) { 241 if (ctx->task && cpuctx->task_ctx != ctx)
244 local_irq_restore(flags);
245 return; 242 return;
246 }
247 243
248 spin_lock(&ctx->lock); 244 spin_lock(&ctx->lock);
249 /* 245 /*
@@ -267,7 +263,7 @@ static void __perf_counter_remove_from_context(void *info)
267 } 263 }
268 264
269 perf_enable(); 265 perf_enable();
270 spin_unlock_irqrestore(&ctx->lock, flags); 266 spin_unlock(&ctx->lock);
271} 267}
272 268
273 269
@@ -383,17 +379,13 @@ static void __perf_counter_disable(void *info)
383 struct perf_counter *counter = info; 379 struct perf_counter *counter = info;
384 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 380 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
385 struct perf_counter_context *ctx = counter->ctx; 381 struct perf_counter_context *ctx = counter->ctx;
386 unsigned long flags;
387 382
388 local_irq_save(flags);
389 /* 383 /*
390 * If this is a per-task counter, need to check whether this 384 * If this is a per-task counter, need to check whether this
391 * counter's task is the current task on this cpu. 385 * counter's task is the current task on this cpu.
392 */ 386 */
393 if (ctx->task && cpuctx->task_ctx != ctx) { 387 if (ctx->task && cpuctx->task_ctx != ctx)
394 local_irq_restore(flags);
395 return; 388 return;
396 }
397 389
398 spin_lock(&ctx->lock); 390 spin_lock(&ctx->lock);
399 391
@@ -411,7 +403,7 @@ static void __perf_counter_disable(void *info)
411 counter->state = PERF_COUNTER_STATE_OFF; 403 counter->state = PERF_COUNTER_STATE_OFF;
412 } 404 }
413 405
414 spin_unlock_irqrestore(&ctx->lock, flags); 406 spin_unlock(&ctx->lock);
415} 407}
416 408
417/* 409/*
@@ -618,10 +610,8 @@ static void __perf_install_in_context(void *info)
618 struct perf_counter_context *ctx = counter->ctx; 610 struct perf_counter_context *ctx = counter->ctx;
619 struct perf_counter *leader = counter->group_leader; 611 struct perf_counter *leader = counter->group_leader;
620 int cpu = smp_processor_id(); 612 int cpu = smp_processor_id();
621 unsigned long flags;
622 int err; 613 int err;
623 614
624 local_irq_save(flags);
625 /* 615 /*
626 * If this is a task context, we need to check whether it is 616 * If this is a task context, we need to check whether it is
627 * the current task context of this cpu. If not it has been 617 * the current task context of this cpu. If not it has been
@@ -630,10 +620,8 @@ static void __perf_install_in_context(void *info)
630 * on this cpu because it had no counters. 620 * on this cpu because it had no counters.
631 */ 621 */
632 if (ctx->task && cpuctx->task_ctx != ctx) { 622 if (ctx->task && cpuctx->task_ctx != ctx) {
633 if (cpuctx->task_ctx || ctx->task != current) { 623 if (cpuctx->task_ctx || ctx->task != current)
634 local_irq_restore(flags);
635 return; 624 return;
636 }
637 cpuctx->task_ctx = ctx; 625 cpuctx->task_ctx = ctx;
638 } 626 }
639 627
@@ -687,7 +675,7 @@ static void __perf_install_in_context(void *info)
687 unlock: 675 unlock:
688 perf_enable(); 676 perf_enable();
689 677
690 spin_unlock_irqrestore(&ctx->lock, flags); 678 spin_unlock(&ctx->lock);
691} 679}
692 680
693/* 681/*
@@ -751,19 +739,15 @@ static void __perf_counter_enable(void *info)
751 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 739 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
752 struct perf_counter_context *ctx = counter->ctx; 740 struct perf_counter_context *ctx = counter->ctx;
753 struct perf_counter *leader = counter->group_leader; 741 struct perf_counter *leader = counter->group_leader;
754 unsigned long flags;
755 int err; 742 int err;
756 743
757 local_irq_save(flags);
758 /* 744 /*
759 * If this is a per-task counter, need to check whether this 745 * If this is a per-task counter, need to check whether this
760 * counter's task is the current task on this cpu. 746 * counter's task is the current task on this cpu.
761 */ 747 */
762 if (ctx->task && cpuctx->task_ctx != ctx) { 748 if (ctx->task && cpuctx->task_ctx != ctx) {
763 if (cpuctx->task_ctx || ctx->task != current) { 749 if (cpuctx->task_ctx || ctx->task != current)
764 local_irq_restore(flags);
765 return; 750 return;
766 }
767 cpuctx->task_ctx = ctx; 751 cpuctx->task_ctx = ctx;
768 } 752 }
769 753
@@ -811,7 +795,7 @@ static void __perf_counter_enable(void *info)
811 } 795 }
812 796
813 unlock: 797 unlock:
814 spin_unlock_irqrestore(&ctx->lock, flags); 798 spin_unlock(&ctx->lock);
815} 799}
816 800
817/* 801/*
@@ -981,6 +965,10 @@ void perf_counter_task_sched_out(struct task_struct *task,
981 spin_lock(&ctx->lock); 965 spin_lock(&ctx->lock);
982 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 966 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
983 if (context_equiv(ctx, next_ctx)) { 967 if (context_equiv(ctx, next_ctx)) {
968 /*
969 * XXX do we need a memory barrier of sorts
970 * wrt to rcu_dereference() of perf_counter_ctxp
971 */
984 task->perf_counter_ctxp = next_ctx; 972 task->perf_counter_ctxp = next_ctx;
985 next->perf_counter_ctxp = ctx; 973 next->perf_counter_ctxp = ctx;
986 ctx->task = next; 974 ctx->task = next;
@@ -998,6 +986,9 @@ void perf_counter_task_sched_out(struct task_struct *task,
998 } 986 }
999} 987}
1000 988
989/*
990 * Called with IRQs disabled
991 */
1001static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 992static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1002{ 993{
1003 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 994 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
@@ -1012,6 +1003,9 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1012 cpuctx->task_ctx = NULL; 1003 cpuctx->task_ctx = NULL;
1013} 1004}
1014 1005
1006/*
1007 * Called with IRQs disabled
1008 */
1015static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1009static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1016{ 1010{
1017 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1011 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
@@ -2431,6 +2425,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2431static void perf_counter_comm_event(struct perf_comm_event *comm_event) 2425static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2432{ 2426{
2433 struct perf_cpu_context *cpuctx; 2427 struct perf_cpu_context *cpuctx;
2428 struct perf_counter_context *ctx;
2434 unsigned int size; 2429 unsigned int size;
2435 char *comm = comm_event->task->comm; 2430 char *comm = comm_event->task->comm;
2436 2431
@@ -2443,9 +2438,17 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2443 2438
2444 cpuctx = &get_cpu_var(perf_cpu_context); 2439 cpuctx = &get_cpu_var(perf_cpu_context);
2445 perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 2440 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2446 if (cpuctx->task_ctx)
2447 perf_counter_comm_ctx(cpuctx->task_ctx, comm_event);
2448 put_cpu_var(perf_cpu_context); 2441 put_cpu_var(perf_cpu_context);
2442
2443 rcu_read_lock();
2444 /*
2445 * doesn't really matter which of the child contexts the
2446 * events ends up in.
2447 */
2448 ctx = rcu_dereference(current->perf_counter_ctxp);
2449 if (ctx)
2450 perf_counter_comm_ctx(ctx, comm_event);
2451 rcu_read_unlock();
2449} 2452}
2450 2453
2451void perf_counter_comm(struct task_struct *task) 2454void perf_counter_comm(struct task_struct *task)
@@ -2536,6 +2539,7 @@ static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2536static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 2539static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2537{ 2540{
2538 struct perf_cpu_context *cpuctx; 2541 struct perf_cpu_context *cpuctx;
2542 struct perf_counter_context *ctx;
2539 struct file *file = mmap_event->file; 2543 struct file *file = mmap_event->file;
2540 unsigned int size; 2544 unsigned int size;
2541 char tmp[16]; 2545 char tmp[16];
@@ -2568,10 +2572,18 @@ got_name:
2568 2572
2569 cpuctx = &get_cpu_var(perf_cpu_context); 2573 cpuctx = &get_cpu_var(perf_cpu_context);
2570 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 2574 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2571 if (cpuctx->task_ctx)
2572 perf_counter_mmap_ctx(cpuctx->task_ctx, mmap_event);
2573 put_cpu_var(perf_cpu_context); 2575 put_cpu_var(perf_cpu_context);
2574 2576
2577 rcu_read_lock();
2578 /*
2579 * doesn't really matter which of the child contexts the
2580 * events ends up in.
2581 */
2582 ctx = rcu_dereference(current->perf_counter_ctxp);
2583 if (ctx)
2584 perf_counter_mmap_ctx(ctx, mmap_event);
2585 rcu_read_unlock();
2586
2575 kfree(buf); 2587 kfree(buf);
2576} 2588}
2577 2589
@@ -2882,6 +2894,7 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2882{ 2894{
2883 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 2895 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2884 int *recursion = perf_swcounter_recursion_context(cpuctx); 2896 int *recursion = perf_swcounter_recursion_context(cpuctx);
2897 struct perf_counter_context *ctx;
2885 2898
2886 if (*recursion) 2899 if (*recursion)
2887 goto out; 2900 goto out;
@@ -2891,10 +2904,15 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2891 2904
2892 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 2905 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2893 nr, nmi, regs, addr); 2906 nr, nmi, regs, addr);
2894 if (cpuctx->task_ctx) { 2907 rcu_read_lock();
2895 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event, 2908 /*
2896 nr, nmi, regs, addr); 2909 * doesn't really matter which of the child contexts the
2897 } 2910 * events ends up in.
2911 */
2912 ctx = rcu_dereference(current->perf_counter_ctxp);
2913 if (ctx)
2914 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
2915 rcu_read_unlock();
2898 2916
2899 barrier(); 2917 barrier();
2900 (*recursion)--; 2918 (*recursion)--;