diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-14 06:28:33 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-14 14:31:23 -0500 |
commit | 5d6a27d8a096868ae313f71f563b06074a7e34fe (patch) | |
tree | 0155a8bedea29a6e3eaaa827d1dd2934f68f695b | |
parent | 8cb391e8786c8072367f0aeb90551903fef074ba (diff) |
perfcounters: add context switch counter
Impact: add new feature, new sw counter
Add a counter that counts the number of context-switches a task
is doing.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_counter.h | 4 | ||||
-rw-r--r-- | kernel/perf_counter.c | 51 |
2 files changed, 53 insertions, 2 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e5d25bf8f74e..d2a16563415f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -53,8 +53,8 @@ enum hw_event_types { | |||
53 | /* | 53 | /* |
54 | * Future software events: | 54 | * Future software events: |
55 | */ | 55 | */ |
56 | /* PERF_COUNT_PAGE_FAULTS = -3, | 56 | PERF_COUNT_PAGE_FAULTS = -3, |
57 | PERF_COUNT_CONTEXT_SWITCHES = -4, */ | 57 | PERF_COUNT_CONTEXT_SWITCHES = -4, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* | 60 | /* |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1f81cde0dc43..09287091c526 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -888,6 +888,54 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { | |||
888 | .hw_perf_counter_read = task_clock_perf_counter_read, | 888 | .hw_perf_counter_read = task_clock_perf_counter_read, |
889 | }; | 889 | }; |
890 | 890 | ||
891 | static u64 get_context_switches(void) | ||
892 | { | ||
893 | struct task_struct *curr = current; | ||
894 | |||
895 | return curr->nvcsw + curr->nivcsw; | ||
896 | } | ||
897 | |||
898 | static void context_switches_perf_counter_update(struct perf_counter *counter) | ||
899 | { | ||
900 | u64 prev, now; | ||
901 | s64 delta; | ||
902 | |||
903 | prev = atomic64_read(&counter->hw.prev_count); | ||
904 | now = get_context_switches(); | ||
905 | |||
906 | atomic64_set(&counter->hw.prev_count, now); | ||
907 | |||
908 | delta = now - prev; | ||
909 | if (WARN_ON_ONCE(delta < 0)) | ||
910 | delta = 0; | ||
911 | |||
912 | atomic64_add(delta, &counter->count); | ||
913 | } | ||
914 | |||
915 | static void context_switches_perf_counter_read(struct perf_counter *counter) | ||
916 | { | ||
917 | context_switches_perf_counter_update(counter); | ||
918 | } | ||
919 | |||
920 | static void context_switches_perf_counter_enable(struct perf_counter *counter) | ||
921 | { | ||
922 | /* | ||
923 | * ->nvcsw + curr->nivcsw is a per-task value already, | ||
924 | * so we dont have to clear it on switch-in. | ||
925 | */ | ||
926 | } | ||
927 | |||
928 | static void context_switches_perf_counter_disable(struct perf_counter *counter) | ||
929 | { | ||
930 | context_switches_perf_counter_update(counter); | ||
931 | } | ||
932 | |||
933 | static const struct hw_perf_counter_ops perf_ops_context_switches = { | ||
934 | .hw_perf_counter_enable = context_switches_perf_counter_enable, | ||
935 | .hw_perf_counter_disable = context_switches_perf_counter_disable, | ||
936 | .hw_perf_counter_read = context_switches_perf_counter_read, | ||
937 | }; | ||
938 | |||
891 | static const struct hw_perf_counter_ops * | 939 | static const struct hw_perf_counter_ops * |
892 | sw_perf_counter_init(struct perf_counter *counter) | 940 | sw_perf_counter_init(struct perf_counter *counter) |
893 | { | 941 | { |
@@ -900,6 +948,9 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
900 | case PERF_COUNT_TASK_CLOCK: | 948 | case PERF_COUNT_TASK_CLOCK: |
901 | hw_ops = &perf_ops_task_clock; | 949 | hw_ops = &perf_ops_task_clock; |
902 | break; | 950 | break; |
951 | case PERF_COUNT_CONTEXT_SWITCHES: | ||
952 | hw_ops = &perf_ops_context_switches; | ||
953 | break; | ||
903 | default: | 954 | default: |
904 | break; | 955 | break; |
905 | } | 956 | } |