aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-16 08:37:10 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:30 -0400
commita4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch)
treee8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /include
parentfa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff)
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/perf_event.h54
2 files changed, 43 insertions, 15 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5f8ad7bec636..8beabb958f61 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
252 252
253extern int perf_trace_init(struct perf_event *event); 253extern int perf_trace_init(struct perf_event *event);
254extern void perf_trace_destroy(struct perf_event *event); 254extern void perf_trace_destroy(struct perf_event *event);
255extern int perf_trace_enable(struct perf_event *event); 255extern int perf_trace_add(struct perf_event *event, int flags);
256extern void perf_trace_disable(struct perf_event *event); 256extern void perf_trace_del(struct perf_event *event, int flags);
257extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 257extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
258 char *filter_str); 258 char *filter_str);
259extern void ftrace_profile_free_filter(struct perf_event *event); 259extern void ftrace_profile_free_filter(struct perf_event *event);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8cafa15af60d..402073c61669 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -538,6 +538,7 @@ struct hw_perf_event {
538 }; 538 };
539#endif 539#endif
540 }; 540 };
541 int state;
541 local64_t prev_count; 542 local64_t prev_count;
542 u64 sample_period; 543 u64 sample_period;
543 u64 last_period; 544 u64 last_period;
@@ -549,6 +550,13 @@ struct hw_perf_event {
549#endif 550#endif
550}; 551};
551 552
553/*
554 * hw_perf_event::state flags
555 */
556#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
557#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
558#define PERF_HES_ARCH 0x04
559
552struct perf_event; 560struct perf_event;
553 561
554/* 562/*
@@ -564,42 +572,62 @@ struct pmu {
564 572
565 int *pmu_disable_count; 573 int *pmu_disable_count;
566 574
575 /*
576 * Fully disable/enable this PMU, can be used to protect from the PMI
577 * as well as for lazy/batch writing of the MSRs.
578 */
567 void (*pmu_enable) (struct pmu *pmu); /* optional */ 579 void (*pmu_enable) (struct pmu *pmu); /* optional */
568 void (*pmu_disable) (struct pmu *pmu); /* optional */ 580 void (*pmu_disable) (struct pmu *pmu); /* optional */
569 581
570 /* 582 /*
583 * Try and initialize the event for this PMU.
571 * Should return -ENOENT when the @event doesn't match this PMU. 584 * Should return -ENOENT when the @event doesn't match this PMU.
572 */ 585 */
573 int (*event_init) (struct perf_event *event); 586 int (*event_init) (struct perf_event *event);
574 587
575 int (*enable) (struct perf_event *event); 588#define PERF_EF_START 0x01 /* start the counter when adding */
576 void (*disable) (struct perf_event *event); 589#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
577 int (*start) (struct perf_event *event); 590#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
578 void (*stop) (struct perf_event *event); 591
592 /*
593 * Adds/Removes a counter to/from the PMU, can be done inside
594 * a transaction, see the ->*_txn() methods.
595 */
596 int (*add) (struct perf_event *event, int flags);
597 void (*del) (struct perf_event *event, int flags);
598
599 /*
600 * Starts/Stops a counter present on the PMU. The PMI handler
601 * should stop the counter when perf_event_overflow() returns
602 * !0. ->start() will be used to continue.
603 */
604 void (*start) (struct perf_event *event, int flags);
605 void (*stop) (struct perf_event *event, int flags);
606
607 /*
608 * Updates the counter value of the event.
609 */
579 void (*read) (struct perf_event *event); 610 void (*read) (struct perf_event *event);
580 void (*unthrottle) (struct perf_event *event);
581 611
582 /* 612 /*
583 * Group events scheduling is treated as a transaction, add 613 * Group events scheduling is treated as a transaction, add
584 * group events as a whole and perform one schedulability test. 614 * group events as a whole and perform one schedulability test.
585 * If the test fails, roll back the whole group 615 * If the test fails, roll back the whole group
586 */ 616 *
587 617 * Start the transaction, after this ->add() doesn't need to
588 /*
589 * Start the transaction, after this ->enable() doesn't need to
590 * do schedulability tests. 618 * do schedulability tests.
591 */ 619 */
592 void (*start_txn) (struct pmu *pmu); /* optional */ 620 void (*start_txn) (struct pmu *pmu); /* optional */
593 /* 621 /*
594 * If ->start_txn() disabled the ->enable() schedulability test 622 * If ->start_txn() disabled the ->add() schedulability test
595 * then ->commit_txn() is required to perform one. On success 623 * then ->commit_txn() is required to perform one. On success
596 * the transaction is closed. On error the transaction is kept 624 * the transaction is closed. On error the transaction is kept
597 * open until ->cancel_txn() is called. 625 * open until ->cancel_txn() is called.
598 */ 626 */
599 int (*commit_txn) (struct pmu *pmu); /* optional */ 627 int (*commit_txn) (struct pmu *pmu); /* optional */
600 /* 628 /*
601 * Will cancel the transaction, assumes ->disable() is called 629 * Will cancel the transaction, assumes ->del() is called
602 * for each successfull ->enable() during the transaction. 630 * for each successfull ->add() during the transaction.
603 */ 631 */
604 void (*cancel_txn) (struct pmu *pmu); /* optional */ 632 void (*cancel_txn) (struct pmu *pmu); /* optional */
605}; 633};
@@ -680,7 +708,7 @@ struct perf_event {
680 int nr_siblings; 708 int nr_siblings;
681 int group_flags; 709 int group_flags;
682 struct perf_event *group_leader; 710 struct perf_event *group_leader;
683 struct pmu *pmu; 711 struct pmu *pmu;
684 712
685 enum perf_event_active_state state; 713 enum perf_event_active_state state;
686 unsigned int attach_state; 714 unsigned int attach_state;