diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-06 03:58:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-06 05:37:22 -0400 |
commit | a21ca2cac582886a3e95c8bb84ff7c52d4d15e54 (patch) | |
tree | d110005d81e46b1afb3204fbaacc132d0ec946ee | |
parent | 2f335a02b3c816e77e7df1d15b12e3bbb8f4c8f0 (diff) |
perf_counter: Separate out attr->type from attr->config
Counter type is a frequently used value and we do a lot of
bit juggling by encoding and decoding it from attr->config.
Clean this up by creating a separate attr->type field.
Also clean up the various similarly complex user-space bits
all around counter attribute management.
The net improvement is significant, and it will be easier
to add a new major type (which is what triggered this cleanup).
(This changes the ABI, all tools are adapted.)
(PowerPC build-tested.)
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | Documentation/perf_counter/builtin-record.c | 105 | ||||
-rw-r--r-- | Documentation/perf_counter/builtin-stat.c | 76 | ||||
-rw-r--r-- | Documentation/perf_counter/builtin-top.c | 67 | ||||
-rw-r--r-- | Documentation/perf_counter/perf.h | 2 | ||||
-rw-r--r-- | Documentation/perf_counter/util/parse-events.c | 120 | ||||
-rw-r--r-- | Documentation/perf_counter/util/parse-events.h | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 8 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 65 | ||||
-rw-r--r-- | kernel/perf_counter.c | 14 |
10 files changed, 196 insertions, 274 deletions
diff --git a/Documentation/perf_counter/builtin-record.c b/Documentation/perf_counter/builtin-record.c index c22ea0c7472a..130fd88266bb 100644 --- a/Documentation/perf_counter/builtin-record.c +++ b/Documentation/perf_counter/builtin-record.c | |||
@@ -20,10 +20,10 @@ | |||
20 | #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) | 20 | #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) |
21 | #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) | 21 | #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) |
22 | 22 | ||
23 | static long default_interval = 100000; | ||
24 | static long event_count[MAX_COUNTERS]; | ||
25 | |||
26 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | 23 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
24 | |||
25 | static long default_interval = 100000; | ||
26 | |||
27 | static int nr_cpus = 0; | 27 | static int nr_cpus = 0; |
28 | static unsigned int page_size; | 28 | static unsigned int page_size; |
29 | static unsigned int mmap_pages = 128; | 29 | static unsigned int mmap_pages = 128; |
@@ -38,22 +38,44 @@ static int inherit = 1; | |||
38 | static int force = 0; | 38 | static int force = 0; |
39 | static int append_file = 0; | 39 | static int append_file = 0; |
40 | 40 | ||
41 | const unsigned int default_count[] = { | 41 | static long samples; |
42 | 1000000, | 42 | static struct timeval last_read; |
43 | 1000000, | 43 | static struct timeval this_read; |
44 | 10000, | 44 | |
45 | 10000, | 45 | static __u64 bytes_written; |
46 | 1000000, | 46 | |
47 | 10000, | 47 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; |
48 | |||
49 | static int nr_poll; | ||
50 | static int nr_cpu; | ||
51 | |||
52 | struct mmap_event { | ||
53 | struct perf_event_header header; | ||
54 | __u32 pid; | ||
55 | __u32 tid; | ||
56 | __u64 start; | ||
57 | __u64 len; | ||
58 | __u64 pgoff; | ||
59 | char filename[PATH_MAX]; | ||
60 | }; | ||
61 | |||
62 | struct comm_event { | ||
63 | struct perf_event_header header; | ||
64 | __u32 pid; | ||
65 | __u32 tid; | ||
66 | char comm[16]; | ||
48 | }; | 67 | }; |
49 | 68 | ||
69 | |||
50 | struct mmap_data { | 70 | struct mmap_data { |
51 | int counter; | 71 | int counter; |
52 | void *base; | 72 | void *base; |
53 | unsigned int mask; | 73 | unsigned int mask; |
54 | unsigned int prev; | 74 | unsigned int prev; |
55 | }; | 75 | }; |
56 | 76 | ||
77 | static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | ||
78 | |||
57 | static unsigned int mmap_read_head(struct mmap_data *md) | 79 | static unsigned int mmap_read_head(struct mmap_data *md) |
58 | { | 80 | { |
59 | struct perf_counter_mmap_page *pc = md->base; | 81 | struct perf_counter_mmap_page *pc = md->base; |
@@ -65,11 +87,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) | |||
65 | return head; | 87 | return head; |
66 | } | 88 | } |
67 | 89 | ||
68 | static long samples; | ||
69 | static struct timeval last_read, this_read; | ||
70 | |||
71 | static __u64 bytes_written; | ||
72 | |||
73 | static void mmap_read(struct mmap_data *md) | 90 | static void mmap_read(struct mmap_data *md) |
74 | { | 91 | { |
75 | unsigned int head = mmap_read_head(md); | 92 | unsigned int head = mmap_read_head(md); |
@@ -157,29 +174,6 @@ static void sig_handler(int sig) | |||
157 | done = 1; | 174 | done = 1; |
158 | } | 175 | } |
159 | 176 | ||
160 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; | ||
161 | static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | ||
162 | |||
163 | static int nr_poll; | ||
164 | static int nr_cpu; | ||
165 | |||
166 | struct mmap_event { | ||
167 | struct perf_event_header header; | ||
168 | __u32 pid; | ||
169 | __u32 tid; | ||
170 | __u64 start; | ||
171 | __u64 len; | ||
172 | __u64 pgoff; | ||
173 | char filename[PATH_MAX]; | ||
174 | }; | ||
175 | |||
176 | struct comm_event { | ||
177 | struct perf_event_header header; | ||
178 | __u32 pid; | ||
179 | __u32 tid; | ||
180 | char comm[16]; | ||
181 | }; | ||
182 | |||
183 | static void pid_synthesize_comm_event(pid_t pid, int full) | 177 | static void pid_synthesize_comm_event(pid_t pid, int full) |
184 | { | 178 | { |
185 | struct comm_event comm_ev; | 179 | struct comm_event comm_ev; |
@@ -341,24 +335,21 @@ static int group_fd; | |||
341 | 335 | ||
342 | static void create_counter(int counter, int cpu, pid_t pid) | 336 | static void create_counter(int counter, int cpu, pid_t pid) |
343 | { | 337 | { |
344 | struct perf_counter_attr attr; | 338 | struct perf_counter_attr *attr = attrs + counter; |
345 | int track = 1; | 339 | int track = 1; |
346 | 340 | ||
347 | memset(&attr, 0, sizeof(attr)); | 341 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; |
348 | attr.config = event_id[counter]; | ||
349 | attr.sample_period = event_count[counter]; | ||
350 | attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD; | ||
351 | if (freq) { | 342 | if (freq) { |
352 | attr.freq = 1; | 343 | attr->freq = 1; |
353 | attr.sample_freq = freq; | 344 | attr->sample_freq = freq; |
354 | } | 345 | } |
355 | attr.mmap = track; | 346 | attr->mmap = track; |
356 | attr.comm = track; | 347 | attr->comm = track; |
357 | attr.inherit = (cpu < 0) && inherit; | 348 | attr->inherit = (cpu < 0) && inherit; |
358 | 349 | ||
359 | track = 0; /* only the first counter needs these */ | 350 | track = 0; /* only the first counter needs these */ |
360 | 351 | ||
361 | fd[nr_cpu][counter] = sys_perf_counter_open(&attr, pid, cpu, group_fd, 0); | 352 | fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); |
362 | 353 | ||
363 | if (fd[nr_cpu][counter] < 0) { | 354 | if (fd[nr_cpu][counter] < 0) { |
364 | int err = errno; | 355 | int err = errno; |
@@ -542,16 +533,14 @@ int cmd_record(int argc, const char **argv, const char *prefix) | |||
542 | if (!argc && target_pid == -1 && !system_wide) | 533 | if (!argc && target_pid == -1 && !system_wide) |
543 | usage_with_options(record_usage, options); | 534 | usage_with_options(record_usage, options); |
544 | 535 | ||
545 | if (!nr_counters) { | 536 | if (!nr_counters) |
546 | nr_counters = 1; | 537 | nr_counters = 1; |
547 | event_id[0] = 0; | ||
548 | } | ||
549 | 538 | ||
550 | for (counter = 0; counter < nr_counters; counter++) { | 539 | for (counter = 0; counter < nr_counters; counter++) { |
551 | if (event_count[counter]) | 540 | if (attrs[counter].sample_period) |
552 | continue; | 541 | continue; |
553 | 542 | ||
554 | event_count[counter] = default_interval; | 543 | attrs[counter].sample_period = default_interval; |
555 | } | 544 | } |
556 | 545 | ||
557 | return __cmd_record(argc, argv); | 546 | return __cmd_record(argc, argv); |
diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c index 4fc0d80440e7..9711e5524233 100644 --- a/Documentation/perf_counter/builtin-stat.c +++ b/Documentation/perf_counter/builtin-stat.c | |||
@@ -44,23 +44,22 @@ | |||
44 | 44 | ||
45 | #include <sys/prctl.h> | 45 | #include <sys/prctl.h> |
46 | 46 | ||
47 | static int system_wide = 0; | 47 | static struct perf_counter_attr default_attrs[MAX_COUNTERS] = { |
48 | static int inherit = 1; | ||
49 | 48 | ||
50 | static __u64 default_event_id[MAX_COUNTERS] = { | 49 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK }, |
51 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), | 50 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES }, |
52 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), | 51 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS }, |
53 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), | 52 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS }, |
54 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), | ||
55 | 53 | ||
56 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), | 54 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES }, |
57 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), | 55 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS }, |
58 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), | 56 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES }, |
59 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), | 57 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES }, |
60 | }; | 58 | }; |
61 | 59 | ||
62 | static int default_interval = 100000; | 60 | static int system_wide = 0; |
63 | static int event_count[MAX_COUNTERS]; | 61 | static int inherit = 1; |
62 | |||
64 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | 63 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
65 | 64 | ||
66 | static int target_pid = -1; | 65 | static int target_pid = -1; |
@@ -86,22 +85,16 @@ static __u64 walltime_nsecs; | |||
86 | 85 | ||
87 | static void create_perfstat_counter(int counter) | 86 | static void create_perfstat_counter(int counter) |
88 | { | 87 | { |
89 | struct perf_counter_attr attr; | 88 | struct perf_counter_attr *attr = attrs + counter; |
90 | |||
91 | memset(&attr, 0, sizeof(attr)); | ||
92 | attr.config = event_id[counter]; | ||
93 | attr.sample_type = 0; | ||
94 | attr.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL; | ||
95 | attr.exclude_user = event_mask[counter] & EVENT_MASK_USER; | ||
96 | 89 | ||
97 | if (scale) | 90 | if (scale) |
98 | attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | 91 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | |
99 | PERF_FORMAT_TOTAL_TIME_RUNNING; | 92 | PERF_FORMAT_TOTAL_TIME_RUNNING; |
100 | 93 | ||
101 | if (system_wide) { | 94 | if (system_wide) { |
102 | int cpu; | 95 | int cpu; |
103 | for (cpu = 0; cpu < nr_cpus; cpu ++) { | 96 | for (cpu = 0; cpu < nr_cpus; cpu ++) { |
104 | fd[cpu][counter] = sys_perf_counter_open(&attr, -1, cpu, -1, 0); | 97 | fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); |
105 | if (fd[cpu][counter] < 0) { | 98 | if (fd[cpu][counter] < 0) { |
106 | printf("perfstat error: syscall returned with %d (%s)\n", | 99 | printf("perfstat error: syscall returned with %d (%s)\n", |
107 | fd[cpu][counter], strerror(errno)); | 100 | fd[cpu][counter], strerror(errno)); |
@@ -109,10 +102,10 @@ static void create_perfstat_counter(int counter) | |||
109 | } | 102 | } |
110 | } | 103 | } |
111 | } else { | 104 | } else { |
112 | attr.inherit = inherit; | 105 | attr->inherit = inherit; |
113 | attr.disabled = 1; | 106 | attr->disabled = 1; |
114 | 107 | ||
115 | fd[0][counter] = sys_perf_counter_open(&attr, 0, -1, -1, 0); | 108 | fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0); |
116 | if (fd[0][counter] < 0) { | 109 | if (fd[0][counter] < 0) { |
117 | printf("perfstat error: syscall returned with %d (%s)\n", | 110 | printf("perfstat error: syscall returned with %d (%s)\n", |
118 | fd[0][counter], strerror(errno)); | 111 | fd[0][counter], strerror(errno)); |
@@ -126,9 +119,13 @@ static void create_perfstat_counter(int counter) | |||
126 | */ | 119 | */ |
127 | static inline int nsec_counter(int counter) | 120 | static inline int nsec_counter(int counter) |
128 | { | 121 | { |
129 | if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK)) | 122 | if (attrs[counter].type != PERF_TYPE_SOFTWARE) |
123 | return 0; | ||
124 | |||
125 | if (attrs[counter].config == PERF_COUNT_CPU_CLOCK) | ||
130 | return 1; | 126 | return 1; |
131 | if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) | 127 | |
128 | if (attrs[counter].config == PERF_COUNT_TASK_CLOCK) | ||
132 | return 1; | 129 | return 1; |
133 | 130 | ||
134 | return 0; | 131 | return 0; |
@@ -177,7 +174,8 @@ static void read_counter(int counter) | |||
177 | /* | 174 | /* |
178 | * Save the full runtime - to allow normalization during printout: | 175 | * Save the full runtime - to allow normalization during printout: |
179 | */ | 176 | */ |
180 | if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) | 177 | if (attrs[counter].type == PERF_TYPE_SOFTWARE && |
178 | attrs[counter].config == PERF_COUNT_TASK_CLOCK) | ||
181 | runtime_nsecs = count[0]; | 179 | runtime_nsecs = count[0]; |
182 | } | 180 | } |
183 | 181 | ||
@@ -203,8 +201,8 @@ static void print_counter(int counter) | |||
203 | 201 | ||
204 | fprintf(stderr, " %14.6f %-20s", | 202 | fprintf(stderr, " %14.6f %-20s", |
205 | msecs, event_name(counter)); | 203 | msecs, event_name(counter)); |
206 | if (event_id[counter] == | 204 | if (attrs[counter].type == PERF_TYPE_SOFTWARE && |
207 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) { | 205 | attrs[counter].config == PERF_COUNT_TASK_CLOCK) { |
208 | 206 | ||
209 | fprintf(stderr, " # %11.3f CPU utilization factor", | 207 | fprintf(stderr, " # %11.3f CPU utilization factor", |
210 | (double)count[0] / (double)walltime_nsecs); | 208 | (double)count[0] / (double)walltime_nsecs); |
@@ -300,8 +298,6 @@ static char events_help_msg[EVENTS_HELP_MAX]; | |||
300 | static const struct option options[] = { | 298 | static const struct option options[] = { |
301 | OPT_CALLBACK('e', "event", NULL, "event", | 299 | OPT_CALLBACK('e', "event", NULL, "event", |
302 | events_help_msg, parse_events), | 300 | events_help_msg, parse_events), |
303 | OPT_INTEGER('c', "count", &default_interval, | ||
304 | "event period to sample"), | ||
305 | OPT_BOOLEAN('i', "inherit", &inherit, | 301 | OPT_BOOLEAN('i', "inherit", &inherit, |
306 | "child tasks inherit counters"), | 302 | "child tasks inherit counters"), |
307 | OPT_INTEGER('p', "pid", &target_pid, | 303 | OPT_INTEGER('p', "pid", &target_pid, |
@@ -315,27 +311,19 @@ static const struct option options[] = { | |||
315 | 311 | ||
316 | int cmd_stat(int argc, const char **argv, const char *prefix) | 312 | int cmd_stat(int argc, const char **argv, const char *prefix) |
317 | { | 313 | { |
318 | int counter; | ||
319 | |||
320 | page_size = sysconf(_SC_PAGE_SIZE); | 314 | page_size = sysconf(_SC_PAGE_SIZE); |
321 | 315 | ||
322 | create_events_help(events_help_msg); | 316 | create_events_help(events_help_msg); |
323 | memcpy(event_id, default_event_id, sizeof(default_event_id)); | 317 | |
318 | memcpy(attrs, default_attrs, sizeof(attrs)); | ||
324 | 319 | ||
325 | argc = parse_options(argc, argv, options, stat_usage, 0); | 320 | argc = parse_options(argc, argv, options, stat_usage, 0); |
326 | if (!argc) | 321 | if (!argc) |
327 | usage_with_options(stat_usage, options); | 322 | usage_with_options(stat_usage, options); |
328 | 323 | ||
329 | if (!nr_counters) { | 324 | if (!nr_counters) |
330 | nr_counters = 8; | 325 | nr_counters = 8; |
331 | } | ||
332 | |||
333 | for (counter = 0; counter < nr_counters; counter++) { | ||
334 | if (event_count[counter]) | ||
335 | continue; | ||
336 | 326 | ||
337 | event_count[counter] = default_interval; | ||
338 | } | ||
339 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 327 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
340 | assert(nr_cpus <= MAX_NR_CPUS); | 328 | assert(nr_cpus <= MAX_NR_CPUS); |
341 | assert(nr_cpus >= 0); | 329 | assert(nr_cpus >= 0); |
diff --git a/Documentation/perf_counter/builtin-top.c b/Documentation/perf_counter/builtin-top.c index b2f480b5a134..98a6d53e17b3 100644 --- a/Documentation/perf_counter/builtin-top.c +++ b/Documentation/perf_counter/builtin-top.c | |||
@@ -48,22 +48,11 @@ | |||
48 | #include <linux/unistd.h> | 48 | #include <linux/unistd.h> |
49 | #include <linux/types.h> | 49 | #include <linux/types.h> |
50 | 50 | ||
51 | static int system_wide = 0; | 51 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
52 | 52 | ||
53 | static __u64 default_event_id[MAX_COUNTERS] = { | 53 | static int system_wide = 0; |
54 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), | ||
55 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), | ||
56 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), | ||
57 | EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), | ||
58 | 54 | ||
59 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), | 55 | static int default_interval = 100000; |
60 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), | ||
61 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), | ||
62 | EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), | ||
63 | }; | ||
64 | static int default_interval = 100000; | ||
65 | static int event_count[MAX_COUNTERS]; | ||
66 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | ||
67 | 56 | ||
68 | static __u64 count_filter = 5; | 57 | static __u64 count_filter = 5; |
69 | static int print_entries = 15; | 58 | static int print_entries = 15; |
@@ -85,15 +74,6 @@ static int delay_secs = 2; | |||
85 | static int zero; | 74 | static int zero; |
86 | static int dump_symtab; | 75 | static int dump_symtab; |
87 | 76 | ||
88 | static const unsigned int default_count[] = { | ||
89 | 1000000, | ||
90 | 1000000, | ||
91 | 10000, | ||
92 | 10000, | ||
93 | 1000000, | ||
94 | 10000, | ||
95 | }; | ||
96 | |||
97 | /* | 77 | /* |
98 | * Symbols | 78 | * Symbols |
99 | */ | 79 | */ |
@@ -112,7 +92,7 @@ struct sym_entry { | |||
112 | 92 | ||
113 | struct sym_entry *sym_filter_entry; | 93 | struct sym_entry *sym_filter_entry; |
114 | 94 | ||
115 | struct dso *kernel_dso; | 95 | struct dso *kernel_dso; |
116 | 96 | ||
117 | /* | 97 | /* |
118 | * Symbols will be added here in record_ip and will get out | 98 | * Symbols will be added here in record_ip and will get out |
@@ -213,7 +193,7 @@ static void print_sym_table(void) | |||
213 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); | 193 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); |
214 | 194 | ||
215 | if (nr_counters == 1) { | 195 | if (nr_counters == 1) { |
216 | printf("%d", event_count[0]); | 196 | printf("%Ld", attrs[0].sample_period); |
217 | if (freq) | 197 | if (freq) |
218 | printf("Hz "); | 198 | printf("Hz "); |
219 | else | 199 | else |
@@ -421,10 +401,10 @@ static void process_event(uint64_t ip, int counter) | |||
421 | } | 401 | } |
422 | 402 | ||
423 | struct mmap_data { | 403 | struct mmap_data { |
424 | int counter; | 404 | int counter; |
425 | void *base; | 405 | void *base; |
426 | unsigned int mask; | 406 | unsigned int mask; |
427 | unsigned int prev; | 407 | unsigned int prev; |
428 | }; | 408 | }; |
429 | 409 | ||
430 | static unsigned int mmap_read_head(struct mmap_data *md) | 410 | static unsigned int mmap_read_head(struct mmap_data *md) |
@@ -539,7 +519,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | |||
539 | 519 | ||
540 | static int __cmd_top(void) | 520 | static int __cmd_top(void) |
541 | { | 521 | { |
542 | struct perf_counter_attr attr; | 522 | struct perf_counter_attr *attr; |
543 | pthread_t thread; | 523 | pthread_t thread; |
544 | int i, counter, group_fd, nr_poll = 0; | 524 | int i, counter, group_fd, nr_poll = 0; |
545 | unsigned int cpu; | 525 | unsigned int cpu; |
@@ -553,13 +533,12 @@ static int __cmd_top(void) | |||
553 | if (target_pid == -1 && profile_cpu == -1) | 533 | if (target_pid == -1 && profile_cpu == -1) |
554 | cpu = i; | 534 | cpu = i; |
555 | 535 | ||
556 | memset(&attr, 0, sizeof(attr)); | 536 | attr = attrs + counter; |
557 | attr.config = event_id[counter]; | ||
558 | attr.sample_period = event_count[counter]; | ||
559 | attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | ||
560 | attr.freq = freq; | ||
561 | 537 | ||
562 | fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0); | 538 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
539 | attr->freq = freq; | ||
540 | |||
541 | fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); | ||
563 | if (fd[i][counter] < 0) { | 542 | if (fd[i][counter] < 0) { |
564 | int err = errno; | 543 | int err = errno; |
565 | 544 | ||
@@ -670,7 +649,6 @@ int cmd_top(int argc, const char **argv, const char *prefix) | |||
670 | page_size = sysconf(_SC_PAGE_SIZE); | 649 | page_size = sysconf(_SC_PAGE_SIZE); |
671 | 650 | ||
672 | create_events_help(events_help_msg); | 651 | create_events_help(events_help_msg); |
673 | memcpy(event_id, default_event_id, sizeof(default_event_id)); | ||
674 | 652 | ||
675 | argc = parse_options(argc, argv, options, top_usage, 0); | 653 | argc = parse_options(argc, argv, options, top_usage, 0); |
676 | if (argc) | 654 | if (argc) |
@@ -688,19 +666,22 @@ int cmd_top(int argc, const char **argv, const char *prefix) | |||
688 | profile_cpu = -1; | 666 | profile_cpu = -1; |
689 | } | 667 | } |
690 | 668 | ||
691 | if (!nr_counters) { | 669 | if (!nr_counters) |
692 | nr_counters = 1; | 670 | nr_counters = 1; |
693 | event_id[0] = 0; | ||
694 | } | ||
695 | 671 | ||
696 | if (delay_secs < 1) | 672 | if (delay_secs < 1) |
697 | delay_secs = 1; | 673 | delay_secs = 1; |
698 | 674 | ||
675 | parse_symbols(); | ||
676 | |||
677 | /* | ||
678 | * Fill in the ones not specifically initialized via -c: | ||
679 | */ | ||
699 | for (counter = 0; counter < nr_counters; counter++) { | 680 | for (counter = 0; counter < nr_counters; counter++) { |
700 | if (event_count[counter]) | 681 | if (attrs[counter].sample_period) |
701 | continue; | 682 | continue; |
702 | 683 | ||
703 | event_count[counter] = default_interval; | 684 | attrs[counter].sample_period = default_interval; |
704 | } | 685 | } |
705 | 686 | ||
706 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 687 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
@@ -710,7 +691,5 @@ int cmd_top(int argc, const char **argv, const char *prefix) | |||
710 | if (target_pid != -1 || profile_cpu != -1) | 691 | if (target_pid != -1 || profile_cpu != -1) |
711 | nr_cpus = 1; | 692 | nr_cpus = 1; |
712 | 693 | ||
713 | parse_symbols(); | ||
714 | |||
715 | return __cmd_top(); | 694 | return __cmd_top(); |
716 | } | 695 | } |
diff --git a/Documentation/perf_counter/perf.h b/Documentation/perf_counter/perf.h index 10622a48b408..af0a5046d743 100644 --- a/Documentation/perf_counter/perf.h +++ b/Documentation/perf_counter/perf.h | |||
@@ -64,6 +64,4 @@ sys_perf_counter_open(struct perf_counter_attr *attr_uptr, | |||
64 | #define MAX_COUNTERS 256 | 64 | #define MAX_COUNTERS 256 |
65 | #define MAX_NR_CPUS 256 | 65 | #define MAX_NR_CPUS 256 |
66 | 66 | ||
67 | #define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id)) | ||
68 | |||
69 | #endif | 67 | #endif |
diff --git a/Documentation/perf_counter/util/parse-events.c b/Documentation/perf_counter/util/parse-events.c index 2fdfd1d923f2..eb56bd996573 100644 --- a/Documentation/perf_counter/util/parse-events.c +++ b/Documentation/perf_counter/util/parse-events.c | |||
@@ -6,37 +6,39 @@ | |||
6 | #include "exec_cmd.h" | 6 | #include "exec_cmd.h" |
7 | #include "string.h" | 7 | #include "string.h" |
8 | 8 | ||
9 | int nr_counters; | 9 | int nr_counters; |
10 | 10 | ||
11 | __u64 event_id[MAX_COUNTERS] = { }; | 11 | struct perf_counter_attr attrs[MAX_COUNTERS]; |
12 | int event_mask[MAX_COUNTERS]; | ||
13 | 12 | ||
14 | struct event_symbol { | 13 | struct event_symbol { |
15 | __u64 event; | 14 | __u8 type; |
16 | char *symbol; | 15 | __u64 config; |
16 | char *symbol; | ||
17 | }; | 17 | }; |
18 | 18 | ||
19 | #define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y | ||
20 | |||
19 | static struct event_symbol event_symbols[] = { | 21 | static struct event_symbol event_symbols[] = { |
20 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", }, | 22 | { C(HARDWARE, CPU_CYCLES), "cpu-cycles", }, |
21 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", }, | 23 | { C(HARDWARE, CPU_CYCLES), "cycles", }, |
22 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", }, | 24 | { C(HARDWARE, INSTRUCTIONS), "instructions", }, |
23 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", }, | 25 | { C(HARDWARE, CACHE_REFERENCES), "cache-references", }, |
24 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", }, | 26 | { C(HARDWARE, CACHE_MISSES), "cache-misses", }, |
25 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", }, | 27 | { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", }, |
26 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", }, | 28 | { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", }, |
27 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", }, | 29 | { C(HARDWARE, BRANCH_MISSES), "branch-misses", }, |
28 | {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", }, | 30 | { C(HARDWARE, BUS_CYCLES), "bus-cycles", }, |
29 | 31 | ||
30 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", }, | 32 | { C(SOFTWARE, CPU_CLOCK), "cpu-clock", }, |
31 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", }, | 33 | { C(SOFTWARE, TASK_CLOCK), "task-clock", }, |
32 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", }, | 34 | { C(SOFTWARE, PAGE_FAULTS), "page-faults", }, |
33 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", }, | 35 | { C(SOFTWARE, PAGE_FAULTS), "faults", }, |
34 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", }, | 36 | { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", }, |
35 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", }, | 37 | { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", }, |
36 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", }, | 38 | { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", }, |
37 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", }, | 39 | { C(SOFTWARE, CONTEXT_SWITCHES), "cs", }, |
38 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", }, | 40 | { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", }, |
39 | {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", }, | 41 | { C(SOFTWARE, CPU_MIGRATIONS), "migrations", }, |
40 | }; | 42 | }; |
41 | 43 | ||
42 | #define __PERF_COUNTER_FIELD(config, name) \ | 44 | #define __PERF_COUNTER_FIELD(config, name) \ |
@@ -67,27 +69,26 @@ static char *sw_event_names[] = { | |||
67 | "major faults", | 69 | "major faults", |
68 | }; | 70 | }; |
69 | 71 | ||
70 | char *event_name(int ctr) | 72 | char *event_name(int counter) |
71 | { | 73 | { |
72 | __u64 config = event_id[ctr]; | 74 | __u64 config = attrs[counter].config; |
73 | int type = PERF_COUNTER_TYPE(config); | 75 | int type = attrs[counter].type; |
74 | int id = PERF_COUNTER_ID(config); | ||
75 | static char buf[32]; | 76 | static char buf[32]; |
76 | 77 | ||
77 | if (PERF_COUNTER_RAW(config)) { | 78 | if (attrs[counter].type == PERF_TYPE_RAW) { |
78 | sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config)); | 79 | sprintf(buf, "raw 0x%llx", config); |
79 | return buf; | 80 | return buf; |
80 | } | 81 | } |
81 | 82 | ||
82 | switch (type) { | 83 | switch (type) { |
83 | case PERF_TYPE_HARDWARE: | 84 | case PERF_TYPE_HARDWARE: |
84 | if (id < PERF_HW_EVENTS_MAX) | 85 | if (config < PERF_HW_EVENTS_MAX) |
85 | return hw_event_names[id]; | 86 | return hw_event_names[config]; |
86 | return "unknown-hardware"; | 87 | return "unknown-hardware"; |
87 | 88 | ||
88 | case PERF_TYPE_SOFTWARE: | 89 | case PERF_TYPE_SOFTWARE: |
89 | if (id < PERF_SW_EVENTS_MAX) | 90 | if (config < PERF_SW_EVENTS_MAX) |
90 | return sw_event_names[id]; | 91 | return sw_event_names[config]; |
91 | return "unknown-software"; | 92 | return "unknown-software"; |
92 | 93 | ||
93 | default: | 94 | default: |
@@ -101,15 +102,19 @@ char *event_name(int ctr) | |||
101 | * Each event can have multiple symbolic names. | 102 | * Each event can have multiple symbolic names. |
102 | * Symbolic names are (almost) exactly matched. | 103 | * Symbolic names are (almost) exactly matched. |
103 | */ | 104 | */ |
104 | static __u64 match_event_symbols(const char *str) | 105 | static int match_event_symbols(const char *str, struct perf_counter_attr *attr) |
105 | { | 106 | { |
106 | __u64 config, id; | 107 | __u64 config, id; |
107 | int type; | 108 | int type; |
108 | unsigned int i; | 109 | unsigned int i; |
109 | const char *sep, *pstr; | 110 | const char *sep, *pstr; |
110 | 111 | ||
111 | if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) | 112 | if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) { |
112 | return config | PERF_COUNTER_RAW_MASK; | 113 | attr->type = PERF_TYPE_RAW; |
114 | attr->config = config; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
113 | 118 | ||
114 | pstr = str; | 119 | pstr = str; |
115 | sep = strchr(pstr, ':'); | 120 | sep = strchr(pstr, ':'); |
@@ -121,35 +126,45 @@ static __u64 match_event_symbols(const char *str) | |||
121 | if (sep) { | 126 | if (sep) { |
122 | pstr = sep + 1; | 127 | pstr = sep + 1; |
123 | if (strchr(pstr, 'k')) | 128 | if (strchr(pstr, 'k')) |
124 | event_mask[nr_counters] |= EVENT_MASK_USER; | 129 | attr->exclude_user = 1; |
125 | if (strchr(pstr, 'u')) | 130 | if (strchr(pstr, 'u')) |
126 | event_mask[nr_counters] |= EVENT_MASK_KERNEL; | 131 | attr->exclude_kernel = 1; |
127 | } | 132 | } |
128 | return EID(type, id); | 133 | attr->type = type; |
134 | attr->config = id; | ||
135 | |||
136 | return 0; | ||
129 | } | 137 | } |
130 | 138 | ||
131 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { | 139 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { |
132 | if (!strncmp(str, event_symbols[i].symbol, | 140 | if (!strncmp(str, event_symbols[i].symbol, |
133 | strlen(event_symbols[i].symbol))) | 141 | strlen(event_symbols[i].symbol))) { |
134 | return event_symbols[i].event; | 142 | |
143 | attr->type = event_symbols[i].type; | ||
144 | attr->config = event_symbols[i].config; | ||
145 | |||
146 | return 0; | ||
147 | } | ||
135 | } | 148 | } |
136 | 149 | ||
137 | return ~0ULL; | 150 | return -EINVAL; |
138 | } | 151 | } |
139 | 152 | ||
140 | int parse_events(const struct option *opt, const char *str, int unset) | 153 | int parse_events(const struct option *opt, const char *str, int unset) |
141 | { | 154 | { |
142 | __u64 config; | 155 | struct perf_counter_attr attr; |
156 | int ret; | ||
143 | 157 | ||
158 | memset(&attr, 0, sizeof(attr)); | ||
144 | again: | 159 | again: |
145 | if (nr_counters == MAX_COUNTERS) | 160 | if (nr_counters == MAX_COUNTERS) |
146 | return -1; | 161 | return -1; |
147 | 162 | ||
148 | config = match_event_symbols(str); | 163 | ret = match_event_symbols(str, &attr); |
149 | if (config == ~0ULL) | 164 | if (ret < 0) |
150 | return -1; | 165 | return ret; |
151 | 166 | ||
152 | event_id[nr_counters] = config; | 167 | attrs[nr_counters] = attr; |
153 | nr_counters++; | 168 | nr_counters++; |
154 | 169 | ||
155 | str = strstr(str, ","); | 170 | str = strstr(str, ","); |
@@ -168,7 +183,6 @@ void create_events_help(char *events_help_msg) | |||
168 | { | 183 | { |
169 | unsigned int i; | 184 | unsigned int i; |
170 | char *str; | 185 | char *str; |
171 | __u64 e; | ||
172 | 186 | ||
173 | str = events_help_msg; | 187 | str = events_help_msg; |
174 | 188 | ||
@@ -178,9 +192,8 @@ void create_events_help(char *events_help_msg) | |||
178 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { | 192 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { |
179 | int type, id; | 193 | int type, id; |
180 | 194 | ||
181 | e = event_symbols[i].event; | 195 | type = event_symbols[i].type; |
182 | type = PERF_COUNTER_TYPE(e); | 196 | id = event_symbols[i].config; |
183 | id = PERF_COUNTER_ID(e); | ||
184 | 197 | ||
185 | if (i) | 198 | if (i) |
186 | str += sprintf(str, "|"); | 199 | str += sprintf(str, "|"); |
@@ -191,4 +204,3 @@ void create_events_help(char *events_help_msg) | |||
191 | 204 | ||
192 | str += sprintf(str, "|rNNN]"); | 205 | str += sprintf(str, "|rNNN]"); |
193 | } | 206 | } |
194 | |||
diff --git a/Documentation/perf_counter/util/parse-events.h b/Documentation/perf_counter/util/parse-events.h index 0da306bb9028..542971c495bd 100644 --- a/Documentation/perf_counter/util/parse-events.h +++ b/Documentation/perf_counter/util/parse-events.h | |||
@@ -3,12 +3,9 @@ | |||
3 | * Parse symbolic events/counts passed in as options: | 3 | * Parse symbolic events/counts passed in as options: |
4 | */ | 4 | */ |
5 | 5 | ||
6 | extern int nr_counters; | 6 | extern int nr_counters; |
7 | extern __u64 event_id[MAX_COUNTERS]; | ||
8 | extern int event_mask[MAX_COUNTERS]; | ||
9 | 7 | ||
10 | #define EVENT_MASK_KERNEL 1 | 8 | extern struct perf_counter_attr attrs[MAX_COUNTERS]; |
11 | #define EVENT_MASK_USER 2 | ||
12 | 9 | ||
13 | extern char *event_name(int ctr); | 10 | extern char *event_name(int ctr); |
14 | 11 | ||
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 232b00a36f79..4786ad9a2887 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -867,13 +867,13 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
867 | 867 | ||
868 | if (!ppmu) | 868 | if (!ppmu) |
869 | return ERR_PTR(-ENXIO); | 869 | return ERR_PTR(-ENXIO); |
870 | if (!perf_event_raw(&counter->attr)) { | 870 | if (counter->attr.type != PERF_TYPE_RAW) { |
871 | ev = perf_event_id(&counter->attr); | 871 | ev = counter->attr.config; |
872 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 872 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
873 | return ERR_PTR(-EOPNOTSUPP); | 873 | return ERR_PTR(-EOPNOTSUPP); |
874 | ev = ppmu->generic_events[ev]; | 874 | ev = ppmu->generic_events[ev]; |
875 | } else { | 875 | } else { |
876 | ev = perf_event_config(&counter->attr); | 876 | ev = counter->attr.config; |
877 | } | 877 | } |
878 | counter->hw.config_base = ev; | 878 | counter->hw.config_base = ev; |
879 | counter->hw.idx = 0; | 879 | counter->hw.idx = 0; |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 8f53f3a7da29..430e048f2854 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -292,15 +292,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
292 | /* | 292 | /* |
293 | * Raw event type provide the config in the event structure | 293 | * Raw event type provide the config in the event structure |
294 | */ | 294 | */ |
295 | if (perf_event_raw(attr)) { | 295 | if (attr->type == PERF_TYPE_RAW) { |
296 | hwc->config |= x86_pmu.raw_event(perf_event_config(attr)); | 296 | hwc->config |= x86_pmu.raw_event(attr->config); |
297 | } else { | 297 | } else { |
298 | if (perf_event_id(attr) >= x86_pmu.max_events) | 298 | if (attr->config >= x86_pmu.max_events) |
299 | return -EINVAL; | 299 | return -EINVAL; |
300 | /* | 300 | /* |
301 | * The generic map: | 301 | * The generic map: |
302 | */ | 302 | */ |
303 | hwc->config |= x86_pmu.event_map(perf_event_id(attr)); | 303 | hwc->config |= x86_pmu.event_map(attr->config); |
304 | } | 304 | } |
305 | 305 | ||
306 | counter->destroy = hw_perf_counter_destroy; | 306 | counter->destroy = hw_perf_counter_destroy; |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4f9d39ecdc05..f794c69b34c9 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -73,26 +73,6 @@ enum sw_event_ids { | |||
73 | PERF_SW_EVENTS_MAX = 7, | 73 | PERF_SW_EVENTS_MAX = 7, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define __PERF_COUNTER_MASK(name) \ | ||
77 | (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ | ||
78 | PERF_COUNTER_##name##_SHIFT) | ||
79 | |||
80 | #define PERF_COUNTER_RAW_BITS 1 | ||
81 | #define PERF_COUNTER_RAW_SHIFT 63 | ||
82 | #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) | ||
83 | |||
84 | #define PERF_COUNTER_CONFIG_BITS 63 | ||
85 | #define PERF_COUNTER_CONFIG_SHIFT 0 | ||
86 | #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) | ||
87 | |||
88 | #define PERF_COUNTER_TYPE_BITS 7 | ||
89 | #define PERF_COUNTER_TYPE_SHIFT 56 | ||
90 | #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) | ||
91 | |||
92 | #define PERF_COUNTER_EVENT_BITS 56 | ||
93 | #define PERF_COUNTER_EVENT_SHIFT 0 | ||
94 | #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) | ||
95 | |||
96 | /* | 76 | /* |
97 | * Bits that can be set in attr.sample_type to request information | 77 | * Bits that can be set in attr.sample_type to request information |
98 | * in the overflow packets. | 78 | * in the overflow packets. |
@@ -125,10 +105,13 @@ enum perf_counter_read_format { | |||
125 | */ | 105 | */ |
126 | struct perf_counter_attr { | 106 | struct perf_counter_attr { |
127 | /* | 107 | /* |
128 | * The MSB of the config word signifies if the rest contains cpu | 108 | * Major type: hardware/software/tracepoint/etc. |
129 | * specific (raw) counter configuration data, if unset, the next | 109 | */ |
130 | * 7 bits are an event type and the rest of the bits are the event | 110 | __u32 type; |
131 | * identifier. | 111 | __u32 __reserved_1; |
112 | |||
113 | /* | ||
114 | * Type specific configuration information. | ||
132 | */ | 115 | */ |
133 | __u64 config; | 116 | __u64 config; |
134 | 117 | ||
@@ -152,12 +135,11 @@ struct perf_counter_attr { | |||
152 | comm : 1, /* include comm data */ | 135 | comm : 1, /* include comm data */ |
153 | freq : 1, /* use freq, not period */ | 136 | freq : 1, /* use freq, not period */ |
154 | 137 | ||
155 | __reserved_1 : 53; | 138 | __reserved_2 : 53; |
156 | 139 | ||
157 | __u32 wakeup_events; /* wakeup every n events */ | 140 | __u32 wakeup_events; /* wakeup every n events */ |
158 | __u32 __reserved_2; | 141 | __u32 __reserved_3; |
159 | 142 | ||
160 | __u64 __reserved_3; | ||
161 | __u64 __reserved_4; | 143 | __u64 __reserved_4; |
162 | }; | 144 | }; |
163 | 145 | ||
@@ -278,8 +260,8 @@ enum perf_event_type { | |||
278 | 260 | ||
279 | /* | 261 | /* |
280 | * struct { | 262 | * struct { |
281 | * struct perf_event_header header; | 263 | * struct perf_event_header header; |
282 | * u32 pid, ppid; | 264 | * u32 pid, ppid; |
283 | * }; | 265 | * }; |
284 | */ | 266 | */ |
285 | PERF_EVENT_FORK = 7, | 267 | PERF_EVENT_FORK = 7, |
@@ -331,27 +313,6 @@ enum perf_event_type { | |||
331 | 313 | ||
332 | struct task_struct; | 314 | struct task_struct; |
333 | 315 | ||
334 | static inline u64 perf_event_raw(struct perf_counter_attr *attr) | ||
335 | { | ||
336 | return attr->config & PERF_COUNTER_RAW_MASK; | ||
337 | } | ||
338 | |||
339 | static inline u64 perf_event_config(struct perf_counter_attr *attr) | ||
340 | { | ||
341 | return attr->config & PERF_COUNTER_CONFIG_MASK; | ||
342 | } | ||
343 | |||
344 | static inline u64 perf_event_type(struct perf_counter_attr *attr) | ||
345 | { | ||
346 | return (attr->config & PERF_COUNTER_TYPE_MASK) >> | ||
347 | PERF_COUNTER_TYPE_SHIFT; | ||
348 | } | ||
349 | |||
350 | static inline u64 perf_event_id(struct perf_counter_attr *attr) | ||
351 | { | ||
352 | return attr->config & PERF_COUNTER_EVENT_MASK; | ||
353 | } | ||
354 | |||
355 | /** | 316 | /** |
356 | * struct hw_perf_counter - performance counter hardware details: | 317 | * struct hw_perf_counter - performance counter hardware details: |
357 | */ | 318 | */ |
@@ -616,8 +577,8 @@ extern int perf_counter_overflow(struct perf_counter *counter, | |||
616 | */ | 577 | */ |
617 | static inline int is_software_counter(struct perf_counter *counter) | 578 | static inline int is_software_counter(struct perf_counter *counter) |
618 | { | 579 | { |
619 | return !perf_event_raw(&counter->attr) && | 580 | return (counter->attr.type != PERF_TYPE_RAW) && |
620 | perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE; | 581 | (counter->attr.type != PERF_TYPE_HARDWARE); |
621 | } | 582 | } |
622 | 583 | ||
623 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | 584 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 47c92fb927f2..75ae76796df1 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3091,14 +3091,12 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3091 | enum perf_event_types type, | 3091 | enum perf_event_types type, |
3092 | u32 event, struct pt_regs *regs) | 3092 | u32 event, struct pt_regs *regs) |
3093 | { | 3093 | { |
3094 | u64 event_config; | ||
3095 | |||
3096 | event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event; | ||
3097 | |||
3098 | if (!perf_swcounter_is_counting(counter)) | 3094 | if (!perf_swcounter_is_counting(counter)) |
3099 | return 0; | 3095 | return 0; |
3100 | 3096 | ||
3101 | if (counter->attr.config != event_config) | 3097 | if (counter->attr.type != type) |
3098 | return 0; | ||
3099 | if (counter->attr.config != event) | ||
3102 | return 0; | 3100 | return 0; |
3103 | 3101 | ||
3104 | if (regs) { | 3102 | if (regs) { |
@@ -3403,7 +3401,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3403 | * to be kernel events, and page faults are never hypervisor | 3401 | * to be kernel events, and page faults are never hypervisor |
3404 | * events. | 3402 | * events. |
3405 | */ | 3403 | */ |
3406 | switch (perf_event_id(&counter->attr)) { | 3404 | switch (counter->attr.config) { |
3407 | case PERF_COUNT_CPU_CLOCK: | 3405 | case PERF_COUNT_CPU_CLOCK: |
3408 | pmu = &perf_ops_cpu_clock; | 3406 | pmu = &perf_ops_cpu_clock; |
3409 | 3407 | ||
@@ -3496,12 +3494,12 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
3496 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) | 3494 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) |
3497 | goto done; | 3495 | goto done; |
3498 | 3496 | ||
3499 | if (perf_event_raw(attr)) { | 3497 | if (attr->type == PERF_TYPE_RAW) { |
3500 | pmu = hw_perf_counter_init(counter); | 3498 | pmu = hw_perf_counter_init(counter); |
3501 | goto done; | 3499 | goto done; |
3502 | } | 3500 | } |
3503 | 3501 | ||
3504 | switch (perf_event_type(attr)) { | 3502 | switch (attr->type) { |
3505 | case PERF_TYPE_HARDWARE: | 3503 | case PERF_TYPE_HARDWARE: |
3506 | pmu = hw_perf_counter_init(counter); | 3504 | pmu = hw_perf_counter_init(counter); |
3507 | break; | 3505 | break; |