aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-19 15:26:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:17 -0400
commit0322cd6ec504b0bf08ca7b2c3d7f43bda37d79c9 (patch)
tree8e5487e1a6700eb865ec1f268d51e9d6d3d38a71
parentb8e83514b64577b48bfb794fe85fcde40a9343ca (diff)
perf_counter: unify irq output code
Impact: cleanup Having 3 slightly different copies of the same code around does nobody any good. First step in revamping the output format. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Orig-LKML-Reference: <20090319194233.929962222@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/powerpc/kernel/perf_counter.c51
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c53
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--kernel/perf_counter.c106
4 files changed, 61 insertions, 151 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 26f69dc7130e..88b72eb4af12 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -663,41 +663,6 @@ void perf_counter_do_pending(void)
663} 663}
664 664
665/* 665/*
666 * Record data for an irq counter.
667 * This function was lifted from the x86 code; maybe it should
668 * go in the core?
669 */
670static void perf_store_irq_data(struct perf_counter *counter, u64 data)
671{
672 struct perf_data *irqdata = counter->irqdata;
673
674 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
675 irqdata->overrun++;
676 } else {
677 u64 *p = (u64 *) &irqdata->data[irqdata->len];
678
679 *p = data;
680 irqdata->len += sizeof(u64);
681 }
682}
683
684/*
685 * Record all the values of the counters in a group
686 */
687static void perf_handle_group(struct perf_counter *counter)
688{
689 struct perf_counter *leader, *sub;
690
691 leader = counter->group_leader;
692 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
693 if (sub != counter)
694 sub->hw_ops->read(sub);
695 perf_store_irq_data(counter, sub->hw_event.event_config);
696 perf_store_irq_data(counter, atomic64_read(&sub->count));
697 }
698}
699
700/*
701 * A counter has overflowed; update its count and record 666 * A counter has overflowed; update its count and record
702 * things if requested. Note that interrupts are hard-disabled 667 * things if requested. Note that interrupts are hard-disabled
703 * here so there is no possibility of being interrupted. 668 * here so there is no possibility of being interrupted.
@@ -736,20 +701,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
736 /* 701 /*
737 * Finally record data if requested. 702 * Finally record data if requested.
738 */ 703 */
739 if (record) { 704 if (record)
740 switch (counter->hw_event.record_type) { 705 perf_counter_output(counter, 1, regs);
741 case PERF_RECORD_SIMPLE:
742 break;
743 case PERF_RECORD_IRQ:
744 perf_store_irq_data(counter, instruction_pointer(regs));
745 counter->wakeup_pending = 1;
746 break;
747 case PERF_RECORD_GROUP:
748 perf_handle_group(counter);
749 counter->wakeup_pending = 1;
750 break;
751 }
752 }
753} 706}
754 707
755/* 708/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d844ae41d5a3..902282d68b0c 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter)
674 x86_perf_counter_update(counter, hwc, idx); 674 x86_perf_counter_update(counter, hwc, idx);
675} 675}
676 676
677static void perf_store_irq_data(struct perf_counter *counter, u64 data)
678{
679 struct perf_data *irqdata = counter->irqdata;
680
681 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
682 irqdata->overrun++;
683 } else {
684 u64 *p = (u64 *) &irqdata->data[irqdata->len];
685
686 *p = data;
687 irqdata->len += sizeof(u64);
688 }
689}
690
691/* 677/*
692 * Save and restart an expired counter. Called by NMI contexts, 678 * Save and restart an expired counter. Called by NMI contexts,
693 * so it has to be careful about preempting normal counter ops: 679 * so it has to be careful about preempting normal counter ops:
@@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter)
704 __pmc_generic_enable(counter, hwc, idx); 690 __pmc_generic_enable(counter, hwc, idx);
705} 691}
706 692
707static void
708perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
709{
710 struct perf_counter *counter, *group_leader = sibling->group_leader;
711
712 /*
713 * Store sibling timestamps (if any):
714 */
715 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
716
717 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
718 perf_store_irq_data(sibling, counter->hw_event.event_config);
719 perf_store_irq_data(sibling, atomic64_read(&counter->count));
720 }
721}
722
723/* 693/*
724 * Maximum interrupt frequency of 100KHz per CPU 694 * Maximum interrupt frequency of 100KHz per CPU
725 */ 695 */
@@ -754,28 +724,7 @@ again:
754 continue; 724 continue;
755 725
756 perf_save_and_restart(counter); 726 perf_save_and_restart(counter);
757 727 perf_counter_output(counter, nmi, regs);
758 switch (counter->hw_event.record_type) {
759 case PERF_RECORD_SIMPLE:
760 continue;
761 case PERF_RECORD_IRQ:
762 perf_store_irq_data(counter, instruction_pointer(regs));
763 break;
764 case PERF_RECORD_GROUP:
765 perf_handle_group(counter, &status, &ack);
766 break;
767 }
768 /*
769 * From NMI context we cannot call into the scheduler to
770 * do a task wakeup - but we mark these generic as
771 * wakeup_pending and initate a wakeup callback:
772 */
773 if (nmi) {
774 counter->wakeup_pending = 1;
775 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
776 } else {
777 wake_up(&counter->waitq);
778 }
779 } 728 }
780 729
781 hw_perf_ack_status(ack); 730 hw_perf_ack_status(ack);
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 8f9394905502..a4b76c0175f3 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -317,6 +317,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
317 struct perf_cpu_context *cpuctx, 317 struct perf_cpu_context *cpuctx,
318 struct perf_counter_context *ctx, int cpu); 318 struct perf_counter_context *ctx, int cpu);
319 319
320extern void perf_counter_output(struct perf_counter *counter,
321 int nmi, struct pt_regs *regs);
320/* 322/*
321 * Return 1 for a software counter, 0 for a hardware counter 323 * Return 1 for a software counter, 0 for a hardware counter
322 */ 324 */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 68a56a68bc74..f054b8c9bf96 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1354,6 +1354,60 @@ static const struct file_operations perf_fops = {
1354}; 1354};
1355 1355
1356/* 1356/*
1357 * Output
1358 */
1359
1360static void perf_counter_store_irq(struct perf_counter *counter, u64 data)
1361{
1362 struct perf_data *irqdata = counter->irqdata;
1363
1364 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1365 irqdata->overrun++;
1366 } else {
1367 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1368
1369 *p = data;
1370 irqdata->len += sizeof(u64);
1371 }
1372}
1373
1374static void perf_counter_handle_group(struct perf_counter *counter)
1375{
1376 struct perf_counter *leader, *sub;
1377
1378 leader = counter->group_leader;
1379 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1380 if (sub != counter)
1381 sub->hw_ops->read(sub);
1382 perf_counter_store_irq(counter, sub->hw_event.event_config);
1383 perf_counter_store_irq(counter, atomic64_read(&sub->count));
1384 }
1385}
1386
1387void perf_counter_output(struct perf_counter *counter,
1388 int nmi, struct pt_regs *regs)
1389{
1390 switch (counter->hw_event.record_type) {
1391 case PERF_RECORD_SIMPLE:
1392 return;
1393
1394 case PERF_RECORD_IRQ:
1395 perf_counter_store_irq(counter, instruction_pointer(regs));
1396 break;
1397
1398 case PERF_RECORD_GROUP:
1399 perf_counter_handle_group(counter);
1400 break;
1401 }
1402
1403 if (nmi) {
1404 counter->wakeup_pending = 1;
1405 set_perf_counter_pending();
1406 } else
1407 wake_up(&counter->waitq);
1408}
1409
1410/*
1357 * Generic software counter infrastructure 1411 * Generic software counter infrastructure
1358 */ 1412 */
1359 1413
@@ -1395,54 +1449,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
1395 atomic64_set(&hwc->count, -left); 1449 atomic64_set(&hwc->count, -left);
1396} 1450}
1397 1451
1398static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data)
1399{
1400 struct perf_data *irqdata = counter->irqdata;
1401
1402 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1403 irqdata->overrun++;
1404 } else {
1405 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1406
1407 *p = data;
1408 irqdata->len += sizeof(u64);
1409 }
1410}
1411
1412static void perf_swcounter_handle_group(struct perf_counter *sibling)
1413{
1414 struct perf_counter *counter, *group_leader = sibling->group_leader;
1415
1416 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
1417 counter->hw_ops->read(counter);
1418 perf_swcounter_store_irq(sibling, counter->hw_event.event_config);
1419 perf_swcounter_store_irq(sibling, atomic64_read(&counter->count));
1420 }
1421}
1422
1423static void perf_swcounter_interrupt(struct perf_counter *counter,
1424 int nmi, struct pt_regs *regs)
1425{
1426 switch (counter->hw_event.record_type) {
1427 case PERF_RECORD_SIMPLE:
1428 break;
1429
1430 case PERF_RECORD_IRQ:
1431 perf_swcounter_store_irq(counter, instruction_pointer(regs));
1432 break;
1433
1434 case PERF_RECORD_GROUP:
1435 perf_swcounter_handle_group(counter);
1436 break;
1437 }
1438
1439 if (nmi) {
1440 counter->wakeup_pending = 1;
1441 set_perf_counter_pending();
1442 } else
1443 wake_up(&counter->waitq);
1444}
1445
1446static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 1452static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1447{ 1453{
1448 struct perf_counter *counter; 1454 struct perf_counter *counter;
@@ -1461,7 +1467,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1461 regs = task_pt_regs(current); 1467 regs = task_pt_regs(current);
1462 1468
1463 if (regs) 1469 if (regs)
1464 perf_swcounter_interrupt(counter, 0, regs); 1470 perf_counter_output(counter, 0, regs);
1465 1471
1466 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); 1472 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1467 1473
@@ -1473,7 +1479,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
1473{ 1479{
1474 perf_swcounter_update(counter); 1480 perf_swcounter_update(counter);
1475 perf_swcounter_set_period(counter); 1481 perf_swcounter_set_period(counter);
1476 perf_swcounter_interrupt(counter, nmi, regs); 1482 perf_counter_output(counter, nmi, regs);
1477} 1483}
1478 1484
1479static int perf_swcounter_match(struct perf_counter *counter, 1485static int perf_swcounter_match(struct perf_counter *counter,