aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /tools/perf
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-annotate.c28
-rw-r--r--tools/perf/builtin-record.c22
-rw-r--r--tools/perf/builtin-report.c48
-rw-r--r--tools/perf/builtin-sched.c20
-rw-r--r--tools/perf/builtin-stat.c10
-rw-r--r--tools/perf/builtin-timechart.c14
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c22
-rw-r--r--tools/perf/design.txt58
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/header.h8
-rw-r--r--tools/perf/util/parse-events.c32
-rw-r--r--tools/perf/util/parse-events.h2
-rw-r--r--tools/perf/util/trace-event-info.c8
-rw-r--r--tools/perf/util/trace-event.h2
18 files changed, 155 insertions, 155 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0aba8b6e9c54..b5f1953b6144 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -318,7 +318,7 @@ export PERL_PATH
318 318
319LIB_FILE=libperf.a 319LIB_FILE=libperf.a
320 320
321LIB_H += ../../include/linux/perf_counter.h 321LIB_H += ../../include/linux/perf_event.h
322LIB_H += ../../include/linux/rbtree.h 322LIB_H += ../../include/linux/rbtree.h
323LIB_H += ../../include/linux/list.h 323LIB_H += ../../include/linux/list.h
324LIB_H += util/include/linux/list.h 324LIB_H += util/include/linux/list.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 043d85b7e254..1ec741615814 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -505,7 +505,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
505 return -1; 505 return -1;
506 } 506 }
507 507
508 if (event->header.misc & PERF_EVENT_MISC_KERNEL) { 508 if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
509 show = SHOW_KERNEL; 509 show = SHOW_KERNEL;
510 level = 'k'; 510 level = 'k';
511 511
@@ -513,7 +513,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
513 513
514 dump_printf(" ...... dso: %s\n", dso->name); 514 dump_printf(" ...... dso: %s\n", dso->name);
515 515
516 } else if (event->header.misc & PERF_EVENT_MISC_USER) { 516 } else if (event->header.misc & PERF_RECORD_MISC_USER) {
517 517
518 show = SHOW_USER; 518 show = SHOW_USER;
519 level = '.'; 519 level = '.';
@@ -565,7 +565,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
565 565
566 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 566 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
567 567
568 dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 568 dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
569 (void *)(offset + head), 569 (void *)(offset + head),
570 (void *)(long)(event->header.size), 570 (void *)(long)(event->header.size),
571 event->mmap.pid, 571 event->mmap.pid,
@@ -575,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
575 event->mmap.filename); 575 event->mmap.filename);
576 576
577 if (thread == NULL || map == NULL) { 577 if (thread == NULL || map == NULL) {
578 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 578 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
579 return 0; 579 return 0;
580 } 580 }
581 581
@@ -591,14 +591,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
591 struct thread *thread; 591 struct thread *thread;
592 592
593 thread = threads__findnew(event->comm.pid, &threads, &last_match); 593 thread = threads__findnew(event->comm.pid, &threads, &last_match);
594 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 594 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
595 (void *)(offset + head), 595 (void *)(offset + head),
596 (void *)(long)(event->header.size), 596 (void *)(long)(event->header.size),
597 event->comm.comm, event->comm.pid); 597 event->comm.comm, event->comm.pid);
598 598
599 if (thread == NULL || 599 if (thread == NULL ||
600 thread__set_comm(thread, event->comm.comm)) { 600 thread__set_comm(thread, event->comm.comm)) {
601 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 601 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
602 return -1; 602 return -1;
603 } 603 }
604 total_comm++; 604 total_comm++;
@@ -614,7 +614,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
614 614
615 thread = threads__findnew(event->fork.pid, &threads, &last_match); 615 thread = threads__findnew(event->fork.pid, &threads, &last_match);
616 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 616 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
617 dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 617 dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
618 (void *)(offset + head), 618 (void *)(offset + head),
619 (void *)(long)(event->header.size), 619 (void *)(long)(event->header.size),
620 event->fork.pid, event->fork.ppid); 620 event->fork.pid, event->fork.ppid);
@@ -627,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
627 return 0; 627 return 0;
628 628
629 if (!thread || !parent || thread__fork(thread, parent)) { 629 if (!thread || !parent || thread__fork(thread, parent)) {
630 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 630 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
631 return -1; 631 return -1;
632 } 632 }
633 total_fork++; 633 total_fork++;
@@ -639,23 +639,23 @@ static int
639process_event(event_t *event, unsigned long offset, unsigned long head) 639process_event(event_t *event, unsigned long offset, unsigned long head)
640{ 640{
641 switch (event->header.type) { 641 switch (event->header.type) {
642 case PERF_EVENT_SAMPLE: 642 case PERF_RECORD_SAMPLE:
643 return process_sample_event(event, offset, head); 643 return process_sample_event(event, offset, head);
644 644
645 case PERF_EVENT_MMAP: 645 case PERF_RECORD_MMAP:
646 return process_mmap_event(event, offset, head); 646 return process_mmap_event(event, offset, head);
647 647
648 case PERF_EVENT_COMM: 648 case PERF_RECORD_COMM:
649 return process_comm_event(event, offset, head); 649 return process_comm_event(event, offset, head);
650 650
651 case PERF_EVENT_FORK: 651 case PERF_RECORD_FORK:
652 return process_fork_event(event, offset, head); 652 return process_fork_event(event, offset, head);
653 /* 653 /*
654 * We dont process them right now but they are fine: 654 * We dont process them right now but they are fine:
655 */ 655 */
656 656
657 case PERF_EVENT_THROTTLE: 657 case PERF_RECORD_THROTTLE:
658 case PERF_EVENT_UNTHROTTLE: 658 case PERF_RECORD_UNTHROTTLE:
659 return 0; 659 return 0;
660 660
661 default: 661 default:
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 2459e5a22ed8..a5a050af8e7d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -77,7 +77,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
77 77
78static unsigned long mmap_read_head(struct mmap_data *md) 78static unsigned long mmap_read_head(struct mmap_data *md)
79{ 79{
80 struct perf_counter_mmap_page *pc = md->base; 80 struct perf_event_mmap_page *pc = md->base;
81 long head; 81 long head;
82 82
83 head = pc->data_head; 83 head = pc->data_head;
@@ -88,7 +88,7 @@ static unsigned long mmap_read_head(struct mmap_data *md)
88 88
89static void mmap_write_tail(struct mmap_data *md, unsigned long tail) 89static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
90{ 90{
91 struct perf_counter_mmap_page *pc = md->base; 91 struct perf_event_mmap_page *pc = md->base;
92 92
93 /* 93 /*
94 * ensure all reads are done before we write the tail out. 94 * ensure all reads are done before we write the tail out.
@@ -233,7 +233,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full)
233 } 233 }
234 } 234 }
235 235
236 comm_ev.header.type = PERF_EVENT_COMM; 236 comm_ev.header.type = PERF_RECORD_COMM;
237 size = ALIGN(size, sizeof(u64)); 237 size = ALIGN(size, sizeof(u64));
238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); 238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size);
239 239
@@ -288,7 +288,7 @@ static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid)
288 while (1) { 288 while (1) {
289 char bf[BUFSIZ], *pbf = bf; 289 char bf[BUFSIZ], *pbf = bf;
290 struct mmap_event mmap_ev = { 290 struct mmap_event mmap_ev = {
291 .header = { .type = PERF_EVENT_MMAP }, 291 .header = { .type = PERF_RECORD_MMAP },
292 }; 292 };
293 int n; 293 int n;
294 size_t size; 294 size_t size;
@@ -355,7 +355,7 @@ static void synthesize_all(void)
355 355
356static int group_fd; 356static int group_fd;
357 357
358static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) 358static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
359{ 359{
360 struct perf_header_attr *h_attr; 360 struct perf_header_attr *h_attr;
361 361
@@ -371,7 +371,7 @@ static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int
371 371
372static void create_counter(int counter, int cpu, pid_t pid) 372static void create_counter(int counter, int cpu, pid_t pid)
373{ 373{
374 struct perf_counter_attr *attr = attrs + counter; 374 struct perf_event_attr *attr = attrs + counter;
375 struct perf_header_attr *h_attr; 375 struct perf_header_attr *h_attr;
376 int track = !counter; /* only the first counter needs these */ 376 int track = !counter; /* only the first counter needs these */
377 struct { 377 struct {
@@ -417,7 +417,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
417 attr->disabled = 1; 417 attr->disabled = 1;
418 418
419try_again: 419try_again:
420 fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); 420 fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
421 421
422 if (fd[nr_cpu][counter] < 0) { 422 if (fd[nr_cpu][counter] < 0) {
423 int err = errno; 423 int err = errno;
@@ -444,7 +444,7 @@ try_again:
444 printf("\n"); 444 printf("\n");
445 error("perfcounter syscall returned with %d (%s)\n", 445 error("perfcounter syscall returned with %d (%s)\n",
446 fd[nr_cpu][counter], strerror(err)); 446 fd[nr_cpu][counter], strerror(err));
447 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 447 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
448 exit(-1); 448 exit(-1);
449 } 449 }
450 450
@@ -478,7 +478,7 @@ try_again:
478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { 478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
479 int ret; 479 int ret;
480 480
481 ret = ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_SET_OUTPUT, multiplex_fd); 481 ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
482 assert(ret != -1); 482 assert(ret != -1);
483 } else { 483 } else {
484 event_array[nr_poll].fd = fd[nr_cpu][counter]; 484 event_array[nr_poll].fd = fd[nr_cpu][counter];
@@ -496,7 +496,7 @@ try_again:
496 } 496 }
497 } 497 }
498 498
499 ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); 499 ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
500} 500}
501 501
502static void open_counters(int cpu, pid_t pid) 502static void open_counters(int cpu, pid_t pid)
@@ -642,7 +642,7 @@ static int __cmd_record(int argc, const char **argv)
642 if (done) { 642 if (done) {
643 for (i = 0; i < nr_cpu; i++) { 643 for (i = 0; i < nr_cpu; i++) {
644 for (counter = 0; counter < nr_counters; counter++) 644 for (counter = 0; counter < nr_counters; counter++)
645 ioctl(fd[i][counter], PERF_COUNTER_IOC_DISABLE); 645 ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
646 } 646 }
647 } 647 }
648 } 648 }
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdf9a8d27bb9..19669c20088e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1121,7 +1121,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1121 more_data += sizeof(u64); 1121 more_data += sizeof(u64);
1122 } 1122 }
1123 1123
1124 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1124 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1125 (void *)(offset + head), 1125 (void *)(offset + head),
1126 (void *)(long)(event->header.size), 1126 (void *)(long)(event->header.size),
1127 event->header.misc, 1127 event->header.misc,
@@ -1158,9 +1158,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm))
1159 return 0; 1159 return 0;
1160 1160
1161 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1161 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1162 1162
1163 if (cpumode == PERF_EVENT_MISC_KERNEL) { 1163 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1164 show = SHOW_KERNEL; 1164 show = SHOW_KERNEL;
1165 level = 'k'; 1165 level = 'k';
1166 1166
@@ -1168,7 +1168,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1168 1168
1169 dump_printf(" ...... dso: %s\n", dso->name); 1169 dump_printf(" ...... dso: %s\n", dso->name);
1170 1170
1171 } else if (cpumode == PERF_EVENT_MISC_USER) { 1171 } else if (cpumode == PERF_RECORD_MISC_USER) {
1172 1172
1173 show = SHOW_USER; 1173 show = SHOW_USER;
1174 level = '.'; 1174 level = '.';
@@ -1210,7 +1210,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1210 1210
1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1212 1212
1213 dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1213 dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1214 (void *)(offset + head), 1214 (void *)(offset + head),
1215 (void *)(long)(event->header.size), 1215 (void *)(long)(event->header.size),
1216 event->mmap.pid, 1216 event->mmap.pid,
@@ -1221,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1221 event->mmap.filename); 1221 event->mmap.filename);
1222 1222
1223 if (thread == NULL || map == NULL) { 1223 if (thread == NULL || map == NULL) {
1224 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 1224 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1225 return 0; 1225 return 0;
1226 } 1226 }
1227 1227
@@ -1238,14 +1238,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1238 1238
1239 thread = threads__findnew(event->comm.pid, &threads, &last_match); 1239 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1240 1240
1241 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1241 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
1242 (void *)(offset + head), 1242 (void *)(offset + head),
1243 (void *)(long)(event->header.size), 1243 (void *)(long)(event->header.size),
1244 event->comm.comm, event->comm.pid); 1244 event->comm.comm, event->comm.pid);
1245 1245
1246 if (thread == NULL || 1246 if (thread == NULL ||
1247 thread__set_comm_adjust(thread, event->comm.comm)) { 1247 thread__set_comm_adjust(thread, event->comm.comm)) {
1248 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 1248 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
1249 return -1; 1249 return -1;
1250 } 1250 }
1251 total_comm++; 1251 total_comm++;
@@ -1262,10 +1262,10 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1262 thread = threads__findnew(event->fork.pid, &threads, &last_match); 1262 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1264 1264
1265 dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1265 dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
1266 (void *)(offset + head), 1266 (void *)(offset + head),
1267 (void *)(long)(event->header.size), 1267 (void *)(long)(event->header.size),
1268 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", 1268 event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT",
1269 event->fork.pid, event->fork.tid, 1269 event->fork.pid, event->fork.tid,
1270 event->fork.ppid, event->fork.ptid); 1270 event->fork.ppid, event->fork.ptid);
1271 1271
@@ -1276,11 +1276,11 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1276 if (thread == parent) 1276 if (thread == parent)
1277 return 0; 1277 return 0;
1278 1278
1279 if (event->header.type == PERF_EVENT_EXIT) 1279 if (event->header.type == PERF_RECORD_EXIT)
1280 return 0; 1280 return 0;
1281 1281
1282 if (!thread || !parent || thread__fork(thread, parent)) { 1282 if (!thread || !parent || thread__fork(thread, parent)) {
1283 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1283 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1284 return -1; 1284 return -1;
1285 } 1285 }
1286 total_fork++; 1286 total_fork++;
@@ -1291,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1291static int 1291static int
1292process_lost_event(event_t *event, unsigned long offset, unsigned long head) 1292process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1293{ 1293{
1294 dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", 1294 dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n",
1295 (void *)(offset + head), 1295 (void *)(offset + head),
1296 (void *)(long)(event->header.size), 1296 (void *)(long)(event->header.size),
1297 event->lost.id, 1297 event->lost.id,
@@ -1305,7 +1305,7 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1305static int 1305static int
1306process_read_event(event_t *event, unsigned long offset, unsigned long head) 1306process_read_event(event_t *event, unsigned long offset, unsigned long head)
1307{ 1307{
1308 struct perf_counter_attr *attr; 1308 struct perf_event_attr *attr;
1309 1309
1310 attr = perf_header__find_attr(event->read.id, header); 1310 attr = perf_header__find_attr(event->read.id, header);
1311 1311
@@ -1319,7 +1319,7 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head)
1319 event->read.value); 1319 event->read.value);
1320 } 1320 }
1321 1321
1322 dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", 1322 dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n",
1323 (void *)(offset + head), 1323 (void *)(offset + head),
1324 (void *)(long)(event->header.size), 1324 (void *)(long)(event->header.size),
1325 event->read.pid, 1325 event->read.pid,
@@ -1337,31 +1337,31 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1337 trace_event(event); 1337 trace_event(event);
1338 1338
1339 switch (event->header.type) { 1339 switch (event->header.type) {
1340 case PERF_EVENT_SAMPLE: 1340 case PERF_RECORD_SAMPLE:
1341 return process_sample_event(event, offset, head); 1341 return process_sample_event(event, offset, head);
1342 1342
1343 case PERF_EVENT_MMAP: 1343 case PERF_RECORD_MMAP:
1344 return process_mmap_event(event, offset, head); 1344 return process_mmap_event(event, offset, head);
1345 1345
1346 case PERF_EVENT_COMM: 1346 case PERF_RECORD_COMM:
1347 return process_comm_event(event, offset, head); 1347 return process_comm_event(event, offset, head);
1348 1348
1349 case PERF_EVENT_FORK: 1349 case PERF_RECORD_FORK:
1350 case PERF_EVENT_EXIT: 1350 case PERF_RECORD_EXIT:
1351 return process_task_event(event, offset, head); 1351 return process_task_event(event, offset, head);
1352 1352
1353 case PERF_EVENT_LOST: 1353 case PERF_RECORD_LOST:
1354 return process_lost_event(event, offset, head); 1354 return process_lost_event(event, offset, head);
1355 1355
1356 case PERF_EVENT_READ: 1356 case PERF_RECORD_READ:
1357 return process_read_event(event, offset, head); 1357 return process_read_event(event, offset, head);
1358 1358
1359 /* 1359 /*
1360 * We dont process them right now but they are fine: 1360 * We dont process them right now but they are fine:
1361 */ 1361 */
1362 1362
1363 case PERF_EVENT_THROTTLE: 1363 case PERF_RECORD_THROTTLE:
1364 case PERF_EVENT_UNTHROTTLE: 1364 case PERF_RECORD_UNTHROTTLE:
1365 return 0; 1365 return 0;
1366 1366
1367 default: 1367 default:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 275d79c6627a..ea9c15c0cdfe 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1573,7 +1573,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1573 more_data += sizeof(u64); 1573 more_data += sizeof(u64);
1574 } 1574 }
1575 1575
1576 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset + head), 1577 (void *)(offset + head),
1578 (void *)(long)(event->header.size), 1578 (void *)(long)(event->header.size),
1579 event->header.misc, 1579 event->header.misc,
@@ -1589,9 +1589,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1589 return -1; 1589 return -1;
1590 } 1590 }
1591 1591
1592 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1592 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1593 1593
1594 if (cpumode == PERF_EVENT_MISC_KERNEL) { 1594 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1595 show = SHOW_KERNEL; 1595 show = SHOW_KERNEL;
1596 level = 'k'; 1596 level = 'k';
1597 1597
@@ -1599,7 +1599,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1599 1599
1600 dump_printf(" ...... dso: %s\n", dso->name); 1600 dump_printf(" ...... dso: %s\n", dso->name);
1601 1601
1602 } else if (cpumode == PERF_EVENT_MISC_USER) { 1602 } else if (cpumode == PERF_RECORD_MISC_USER) {
1603 1603
1604 show = SHOW_USER; 1604 show = SHOW_USER;
1605 level = '.'; 1605 level = '.';
@@ -1626,23 +1626,23 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1626 1626
1627 nr_events++; 1627 nr_events++;
1628 switch (event->header.type) { 1628 switch (event->header.type) {
1629 case PERF_EVENT_MMAP: 1629 case PERF_RECORD_MMAP:
1630 return 0; 1630 return 0;
1631 case PERF_EVENT_LOST: 1631 case PERF_RECORD_LOST:
1632 nr_lost_chunks++; 1632 nr_lost_chunks++;
1633 nr_lost_events += event->lost.lost; 1633 nr_lost_events += event->lost.lost;
1634 return 0; 1634 return 0;
1635 1635
1636 case PERF_EVENT_COMM: 1636 case PERF_RECORD_COMM:
1637 return process_comm_event(event, offset, head); 1637 return process_comm_event(event, offset, head);
1638 1638
1639 case PERF_EVENT_EXIT ... PERF_EVENT_READ: 1639 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
1640 return 0; 1640 return 0;
1641 1641
1642 case PERF_EVENT_SAMPLE: 1642 case PERF_RECORD_SAMPLE:
1643 return process_sample_event(event, offset, head); 1643 return process_sample_event(event, offset, head);
1644 1644
1645 case PERF_EVENT_MAX: 1645 case PERF_RECORD_MAX:
1646 default: 1646 default:
1647 return -1; 1647 return -1;
1648 } 1648 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 61b828236c11..16af2d82e858 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -48,7 +48,7 @@
48#include <sys/prctl.h> 48#include <sys/prctl.h>
49#include <math.h> 49#include <math.h>
50 50
51static struct perf_counter_attr default_attrs[] = { 51static struct perf_event_attr default_attrs[] = {
52 52
53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, 54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -130,11 +130,11 @@ struct stats runtime_cycles_stats;
130 attrs[counter].config == PERF_COUNT_##c) 130 attrs[counter].config == PERF_COUNT_##c)
131 131
132#define ERR_PERF_OPEN \ 132#define ERR_PERF_OPEN \
133"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" 133"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
134 134
135static void create_perf_stat_counter(int counter, int pid) 135static void create_perf_stat_counter(int counter, int pid)
136{ 136{
137 struct perf_counter_attr *attr = attrs + counter; 137 struct perf_event_attr *attr = attrs + counter;
138 138
139 if (scale) 139 if (scale)
140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -144,7 +144,7 @@ static void create_perf_stat_counter(int counter, int pid)
144 unsigned int cpu; 144 unsigned int cpu;
145 145
146 for (cpu = 0; cpu < nr_cpus; cpu++) { 146 for (cpu = 0; cpu < nr_cpus; cpu++) {
147 fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); 147 fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0);
148 if (fd[cpu][counter] < 0 && verbose) 148 if (fd[cpu][counter] < 0 && verbose)
149 fprintf(stderr, ERR_PERF_OPEN, counter, 149 fprintf(stderr, ERR_PERF_OPEN, counter,
150 fd[cpu][counter], strerror(errno)); 150 fd[cpu][counter], strerror(errno));
@@ -154,7 +154,7 @@ static void create_perf_stat_counter(int counter, int pid)
154 attr->disabled = 1; 154 attr->disabled = 1;
155 attr->enable_on_exec = 1; 155 attr->enable_on_exec = 1;
156 156
157 fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); 157 fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
158 if (fd[0][counter] < 0 && verbose) 158 if (fd[0][counter] < 0 && verbose)
159 fprintf(stderr, ERR_PERF_OPEN, counter, 159 fprintf(stderr, ERR_PERF_OPEN, counter,
160 fd[0][counter], strerror(errno)); 160 fd[0][counter], strerror(errno));
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 600406396274..4405681b3134 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -937,21 +937,21 @@ process_event(event_t *event)
937 937
938 switch (event->header.type) { 938 switch (event->header.type) {
939 939
940 case PERF_EVENT_COMM: 940 case PERF_RECORD_COMM:
941 return process_comm_event(event); 941 return process_comm_event(event);
942 case PERF_EVENT_FORK: 942 case PERF_RECORD_FORK:
943 return process_fork_event(event); 943 return process_fork_event(event);
944 case PERF_EVENT_EXIT: 944 case PERF_RECORD_EXIT:
945 return process_exit_event(event); 945 return process_exit_event(event);
946 case PERF_EVENT_SAMPLE: 946 case PERF_RECORD_SAMPLE:
947 return queue_sample_event(event); 947 return queue_sample_event(event);
948 948
949 /* 949 /*
950 * We dont process them right now but they are fine: 950 * We dont process them right now but they are fine:
951 */ 951 */
952 case PERF_EVENT_MMAP: 952 case PERF_RECORD_MMAP:
953 case PERF_EVENT_THROTTLE: 953 case PERF_RECORD_THROTTLE:
954 case PERF_EVENT_UNTHROTTLE: 954 case PERF_RECORD_UNTHROTTLE:
955 return 0; 955 return 0;
956 956
957 default: 957 default:
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4002ccb36750..1ca88896eee4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -901,7 +901,7 @@ struct mmap_data {
901 901
902static unsigned int mmap_read_head(struct mmap_data *md) 902static unsigned int mmap_read_head(struct mmap_data *md)
903{ 903{
904 struct perf_counter_mmap_page *pc = md->base; 904 struct perf_event_mmap_page *pc = md->base;
905 int head; 905 int head;
906 906
907 head = pc->data_head; 907 head = pc->data_head;
@@ -977,9 +977,9 @@ static void mmap_read_counter(struct mmap_data *md)
977 977
978 old += size; 978 old += size;
979 979
980 if (event->header.type == PERF_EVENT_SAMPLE) { 980 if (event->header.type == PERF_RECORD_SAMPLE) {
981 int user = 981 int user =
982 (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; 982 (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER;
983 process_event(event->ip.ip, md->counter, user); 983 process_event(event->ip.ip, md->counter, user);
984 } 984 }
985 } 985 }
@@ -1005,7 +1005,7 @@ int group_fd;
1005 1005
1006static void start_counter(int i, int counter) 1006static void start_counter(int i, int counter)
1007{ 1007{
1008 struct perf_counter_attr *attr; 1008 struct perf_event_attr *attr;
1009 int cpu; 1009 int cpu;
1010 1010
1011 cpu = profile_cpu; 1011 cpu = profile_cpu;
@@ -1019,7 +1019,7 @@ static void start_counter(int i, int counter)
1019 attr->inherit = (cpu < 0) && inherit; 1019 attr->inherit = (cpu < 0) && inherit;
1020 1020
1021try_again: 1021try_again:
1022 fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); 1022 fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
1023 1023
1024 if (fd[i][counter] < 0) { 1024 if (fd[i][counter] < 0) {
1025 int err = errno; 1025 int err = errno;
@@ -1044,7 +1044,7 @@ try_again:
1044 printf("\n"); 1044 printf("\n");
1045 error("perfcounter syscall returned with %d (%s)\n", 1045 error("perfcounter syscall returned with %d (%s)\n",
1046 fd[i][counter], strerror(err)); 1046 fd[i][counter], strerror(err));
1047 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 1047 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1048 exit(-1); 1048 exit(-1);
1049 } 1049 }
1050 assert(fd[i][counter] >= 0); 1050 assert(fd[i][counter] >= 0);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 914ab366e369..e9d256e2f47d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -35,14 +35,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
35 35
36 thread = threads__findnew(event->comm.pid, &threads, &last_match); 36 thread = threads__findnew(event->comm.pid, &threads, &last_match);
37 37
38 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 38 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
39 (void *)(offset + head), 39 (void *)(offset + head),
40 (void *)(long)(event->header.size), 40 (void *)(long)(event->header.size),
41 event->comm.comm, event->comm.pid); 41 event->comm.comm, event->comm.pid);
42 42
43 if (thread == NULL || 43 if (thread == NULL ||
44 thread__set_comm(thread, event->comm.comm)) { 44 thread__set_comm(thread, event->comm.comm)) {
45 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 45 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
46 return -1; 46 return -1;
47 } 47 }
48 total_comm++; 48 total_comm++;
@@ -82,7 +82,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
82 more_data += sizeof(u64); 82 more_data += sizeof(u64);
83 } 83 }
84 84
85 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 85 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
86 (void *)(offset + head), 86 (void *)(offset + head),
87 (void *)(long)(event->header.size), 87 (void *)(long)(event->header.size),
88 event->header.misc, 88 event->header.misc,
@@ -98,9 +98,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
98 return -1; 98 return -1;
99 } 99 }
100 100
101 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 101 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
102 102
103 if (cpumode == PERF_EVENT_MISC_KERNEL) { 103 if (cpumode == PERF_RECORD_MISC_KERNEL) {
104 show = SHOW_KERNEL; 104 show = SHOW_KERNEL;
105 level = 'k'; 105 level = 'k';
106 106
@@ -108,7 +108,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
108 108
109 dump_printf(" ...... dso: %s\n", dso->name); 109 dump_printf(" ...... dso: %s\n", dso->name);
110 110
111 } else if (cpumode == PERF_EVENT_MISC_USER) { 111 } else if (cpumode == PERF_RECORD_MISC_USER) {
112 112
113 show = SHOW_USER; 113 show = SHOW_USER;
114 level = '.'; 114 level = '.';
@@ -146,19 +146,19 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
146 trace_event(event); 146 trace_event(event);
147 147
148 switch (event->header.type) { 148 switch (event->header.type) {
149 case PERF_EVENT_MMAP ... PERF_EVENT_LOST: 149 case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
150 return 0; 150 return 0;
151 151
152 case PERF_EVENT_COMM: 152 case PERF_RECORD_COMM:
153 return process_comm_event(event, offset, head); 153 return process_comm_event(event, offset, head);
154 154
155 case PERF_EVENT_EXIT ... PERF_EVENT_READ: 155 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
156 return 0; 156 return 0;
157 157
158 case PERF_EVENT_SAMPLE: 158 case PERF_RECORD_SAMPLE:
159 return process_sample_event(event, offset, head); 159 return process_sample_event(event, offset, head);
160 160
161 case PERF_EVENT_MAX: 161 case PERF_RECORD_MAX:
162 default: 162 default:
163 return -1; 163 return -1;
164 } 164 }
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index f71e0d245cba..f1946d107b10 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -18,10 +18,10 @@ underlying hardware counters.
18Performance counters are accessed via special file descriptors. 18Performance counters are accessed via special file descriptors.
19There's one file descriptor per virtual counter used. 19There's one file descriptor per virtual counter used.
20 20
21The special file descriptor is opened via the perf_counter_open() 21The special file descriptor is opened via the perf_event_open()
22system call: 22system call:
23 23
24 int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, 24 int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr,
25 pid_t pid, int cpu, int group_fd, 25 pid_t pid, int cpu, int group_fd,
26 unsigned long flags); 26 unsigned long flags);
27 27
@@ -32,9 +32,9 @@ can be used to set the blocking mode, etc.
32Multiple counters can be kept open at a time, and the counters 32Multiple counters can be kept open at a time, and the counters
33can be poll()ed. 33can be poll()ed.
34 34
35When creating a new counter fd, 'perf_counter_hw_event' is: 35When creating a new counter fd, 'perf_event_hw_event' is:
36 36
37struct perf_counter_hw_event { 37struct perf_event_hw_event {
38 /* 38 /*
39 * The MSB of the config word signifies if the rest contains cpu 39 * The MSB of the config word signifies if the rest contains cpu
40 * specific (raw) counter configuration data, if unset, the next 40 * specific (raw) counter configuration data, if unset, the next
@@ -93,7 +93,7 @@ specified by 'event_id':
93 93
94/* 94/*
95 * Generalized performance counter event types, used by the hw_event.event_id 95 * Generalized performance counter event types, used by the hw_event.event_id
96 * parameter of the sys_perf_counter_open() syscall: 96 * parameter of the sys_perf_event_open() syscall:
97 */ 97 */
98enum hw_event_ids { 98enum hw_event_ids {
99 /* 99 /*
@@ -159,7 +159,7 @@ in size.
159 * reads on the counter should return the indicated quantities, 159 * reads on the counter should return the indicated quantities,
160 * in increasing order of bit value, after the counter value. 160 * in increasing order of bit value, after the counter value.
161 */ 161 */
162enum perf_counter_read_format { 162enum perf_event_read_format {
163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1, 163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2, 164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
165}; 165};
@@ -178,7 +178,7 @@ interrupt:
178 * Bits that can be set in hw_event.record_type to request information 178 * Bits that can be set in hw_event.record_type to request information
179 * in the overflow packets. 179 * in the overflow packets.
180 */ 180 */
181enum perf_counter_record_format { 181enum perf_event_record_format {
182 PERF_RECORD_IP = 1U << 0, 182 PERF_RECORD_IP = 1U << 0,
183 PERF_RECORD_TID = 1U << 1, 183 PERF_RECORD_TID = 1U << 1,
184 PERF_RECORD_TIME = 1U << 2, 184 PERF_RECORD_TIME = 1U << 2,
@@ -228,7 +228,7 @@ these events are recorded in the ring-buffer (see below).
228The 'comm' bit allows tracking of process comm data on process creation. 228The 'comm' bit allows tracking of process comm data on process creation.
229This too is recorded in the ring-buffer (see below). 229This too is recorded in the ring-buffer (see below).
230 230
231The 'pid' parameter to the perf_counter_open() system call allows the 231The 'pid' parameter to the perf_event_open() system call allows the
232counter to be specific to a task: 232counter to be specific to a task:
233 233
234 pid == 0: if the pid parameter is zero, the counter is attached to the 234 pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -258,7 +258,7 @@ The 'flags' parameter is currently unused and must be zero.
258 258
259The 'group_fd' parameter allows counter "groups" to be set up. A 259The 'group_fd' parameter allows counter "groups" to be set up. A
260counter group has one counter which is the group "leader". The leader 260counter group has one counter which is the group "leader". The leader
261is created first, with group_fd = -1 in the perf_counter_open call 261is created first, with group_fd = -1 in the perf_event_open call
262that creates it. The rest of the group members are created 262that creates it. The rest of the group members are created
263subsequently, with group_fd giving the fd of the group leader. 263subsequently, with group_fd giving the fd of the group leader.
264(A single counter on its own is created with group_fd = -1 and is 264(A single counter on its own is created with group_fd = -1 and is
@@ -277,13 +277,13 @@ tracking are logged into a ring-buffer. This ring-buffer is created and
277accessed through mmap(). 277accessed through mmap().
278 278
279The mmap size should be 1+2^n pages, where the first page is a meta-data page 279The mmap size should be 1+2^n pages, where the first page is a meta-data page
280(struct perf_counter_mmap_page) that contains various bits of information such 280(struct perf_event_mmap_page) that contains various bits of information such
281as where the ring-buffer head is. 281as where the ring-buffer head is.
282 282
283/* 283/*
284 * Structure of the page that can be mapped via mmap 284 * Structure of the page that can be mapped via mmap
285 */ 285 */
286struct perf_counter_mmap_page { 286struct perf_event_mmap_page {
287 __u32 version; /* version number of this structure */ 287 __u32 version; /* version number of this structure */
288 __u32 compat_version; /* lowest version this is compat with */ 288 __u32 compat_version; /* lowest version this is compat with */
289 289
@@ -317,7 +317,7 @@ struct perf_counter_mmap_page {
317 * Control data for the mmap() data buffer. 317 * Control data for the mmap() data buffer.
318 * 318 *
319 * User-space reading this value should issue an rmb(), on SMP capable 319 * User-space reading this value should issue an rmb(), on SMP capable
320 * platforms, after reading this value -- see perf_counter_wakeup(). 320 * platforms, after reading this value -- see perf_event_wakeup().
321 */ 321 */
322 __u32 data_head; /* head in the data section */ 322 __u32 data_head; /* head in the data section */
323}; 323};
@@ -327,9 +327,9 @@ NOTE: the hw-counter userspace bits are arch specific and are currently only
327 327
328The following 2^n pages are the ring-buffer which contains events of the form: 328The following 2^n pages are the ring-buffer which contains events of the form:
329 329
330#define PERF_EVENT_MISC_KERNEL (1 << 0) 330#define PERF_RECORD_MISC_KERNEL (1 << 0)
331#define PERF_EVENT_MISC_USER (1 << 1) 331#define PERF_RECORD_MISC_USER (1 << 1)
332#define PERF_EVENT_MISC_OVERFLOW (1 << 2) 332#define PERF_RECORD_MISC_OVERFLOW (1 << 2)
333 333
334struct perf_event_header { 334struct perf_event_header {
335 __u32 type; 335 __u32 type;
@@ -353,8 +353,8 @@ enum perf_event_type {
353 * char filename[]; 353 * char filename[];
354 * }; 354 * };
355 */ 355 */
356 PERF_EVENT_MMAP = 1, 356 PERF_RECORD_MMAP = 1,
357 PERF_EVENT_MUNMAP = 2, 357 PERF_RECORD_MUNMAP = 2,
358 358
359 /* 359 /*
360 * struct { 360 * struct {
@@ -364,10 +364,10 @@ enum perf_event_type {
364 * char comm[]; 364 * char comm[];
365 * }; 365 * };
366 */ 366 */
367 PERF_EVENT_COMM = 3, 367 PERF_RECORD_COMM = 3,
368 368
369 /* 369 /*
370 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 370 * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field
371 * will be PERF_RECORD_* 371 * will be PERF_RECORD_*
372 * 372 *
373 * struct { 373 * struct {
@@ -397,7 +397,7 @@ Notification of new events is possible through poll()/select()/epoll() and
397fcntl() managing signals. 397fcntl() managing signals.
398 398
399Normally a notification is generated for every page filled, however one can 399Normally a notification is generated for every page filled, however one can
400additionally set perf_counter_hw_event.wakeup_events to generate one every 400additionally set perf_event_hw_event.wakeup_events to generate one every
401so many counter overflow events. 401so many counter overflow events.
402 402
403Future work will include a splice() interface to the ring-buffer. 403Future work will include a splice() interface to the ring-buffer.
@@ -409,11 +409,11 @@ events but does continue to exist and maintain its count value.
409 409
410An individual counter or counter group can be enabled with 410An individual counter or counter group can be enabled with
411 411
412 ioctl(fd, PERF_COUNTER_IOC_ENABLE); 412 ioctl(fd, PERF_EVENT_IOC_ENABLE);
413 413
414or disabled with 414or disabled with
415 415
416 ioctl(fd, PERF_COUNTER_IOC_DISABLE); 416 ioctl(fd, PERF_EVENT_IOC_DISABLE);
417 417
418Enabling or disabling the leader of a group enables or disables the 418Enabling or disabling the leader of a group enables or disables the
419whole group; that is, while the group leader is disabled, none of the 419whole group; that is, while the group leader is disabled, none of the
@@ -424,16 +424,16 @@ other counter.
424 424
425Additionally, non-inherited overflow counters can use 425Additionally, non-inherited overflow counters can use
426 426
427 ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); 427 ioctl(fd, PERF_EVENT_IOC_REFRESH, nr);
428 428
429to enable a counter for 'nr' events, after which it gets disabled again. 429to enable a counter for 'nr' events, after which it gets disabled again.
430 430
431A process can enable or disable all the counter groups that are 431A process can enable or disable all the counter groups that are
432attached to it, using prctl: 432attached to it, using prctl:
433 433
434 prctl(PR_TASK_PERF_COUNTERS_ENABLE); 434 prctl(PR_TASK_PERF_EVENTS_ENABLE);
435 435
436 prctl(PR_TASK_PERF_COUNTERS_DISABLE); 436 prctl(PR_TASK_PERF_EVENTS_DISABLE);
437 437
438This applies to all counters on the current process, whether created 438This applies to all counters on the current process, whether created
439by this process or by another, and doesn't affect any counters that 439by this process or by another, and doesn't affect any counters that
@@ -447,11 +447,11 @@ Arch requirements
447If your architecture does not have hardware performance metrics, you can 447If your architecture does not have hardware performance metrics, you can
448still use the generic software counters based on hrtimers for sampling. 448still use the generic software counters based on hrtimers for sampling.
449 449
450So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you 450So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
451will need at least this: 451will need at least this:
452 - asm/perf_counter.h - a basic stub will suffice at first 452 - asm/perf_event.h - a basic stub will suffice at first
453 - support for atomic64 types (and associated helper functions) 453 - support for atomic64 types (and associated helper functions)
454 - set_perf_counter_pending() implemented 454 - set_perf_event_pending() implemented
455 455
456If your architecture does have hardware capabilities, you can override the 456If your architecture does have hardware capabilities, you can override the
457weak stub hw_perf_counter_init() to register hardware counters. 457weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2abeb20d0bf3..8cc4623afd6f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,15 +52,15 @@
52#include <sys/types.h> 52#include <sys/types.h>
53#include <sys/syscall.h> 53#include <sys/syscall.h>
54 54
55#include "../../include/linux/perf_counter.h" 55#include "../../include/linux/perf_event.h"
56#include "util/types.h" 56#include "util/types.h"
57 57
58/* 58/*
59 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all 59 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
60 * counters in the current task. 60 * counters in the current task.
61 */ 61 */
62#define PR_TASK_PERF_COUNTERS_DISABLE 31 62#define PR_TASK_PERF_EVENTS_DISABLE 31
63#define PR_TASK_PERF_COUNTERS_ENABLE 32 63#define PR_TASK_PERF_EVENTS_ENABLE 32
64 64
65#ifndef NSEC_PER_SEC 65#ifndef NSEC_PER_SEC
66# define NSEC_PER_SEC 1000000000ULL 66# define NSEC_PER_SEC 1000000000ULL
@@ -90,12 +90,12 @@ static inline unsigned long long rdclock(void)
90 _min1 < _min2 ? _min1 : _min2; }) 90 _min1 < _min2 ? _min1 : _min2; })
91 91
92static inline int 92static inline int
93sys_perf_counter_open(struct perf_counter_attr *attr, 93sys_perf_event_open(struct perf_event_attr *attr,
94 pid_t pid, int cpu, int group_fd, 94 pid_t pid, int cpu, int group_fd,
95 unsigned long flags) 95 unsigned long flags)
96{ 96{
97 attr->size = sizeof(*attr); 97 attr->size = sizeof(*attr);
98 return syscall(__NR_perf_counter_open, attr, pid, cpu, 98 return syscall(__NR_perf_event_open, attr, pid, cpu,
99 group_fd, flags); 99 group_fd, flags);
100} 100}
101 101
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 018d414a09d1..2c9c26d6ded0 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,5 +1,5 @@
1#ifndef __PERF_EVENT_H 1#ifndef __PERF_RECORD_H
2#define __PERF_EVENT_H 2#define __PERF_RECORD_H
3#include "../perf.h" 3#include "../perf.h"
4#include "util.h" 4#include "util.h"
5#include <linux/list.h> 5#include <linux/list.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bb4fca3efcc3..e306857b2c2b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -9,7 +9,7 @@
9/* 9/*
10 * Create new perf.data header attribute: 10 * Create new perf.data header attribute:
11 */ 11 */
12struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr) 12struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
13{ 13{
14 struct perf_header_attr *self = malloc(sizeof(*self)); 14 struct perf_header_attr *self = malloc(sizeof(*self));
15 15
@@ -134,7 +134,7 @@ struct perf_file_section {
134}; 134};
135 135
136struct perf_file_attr { 136struct perf_file_attr {
137 struct perf_counter_attr attr; 137 struct perf_event_attr attr;
138 struct perf_file_section ids; 138 struct perf_file_section ids;
139}; 139};
140 140
@@ -320,7 +320,7 @@ u64 perf_header__sample_type(struct perf_header *header)
320 return type; 320 return type;
321} 321}
322 322
323struct perf_counter_attr * 323struct perf_event_attr *
324perf_header__find_attr(u64 id, struct perf_header *header) 324perf_header__find_attr(u64 id, struct perf_header *header)
325{ 325{
326 int i; 326 int i;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 7b0e84a87179..a0761bc7863c 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,12 +1,12 @@
1#ifndef _PERF_HEADER_H 1#ifndef _PERF_HEADER_H
2#define _PERF_HEADER_H 2#define _PERF_HEADER_H
3 3
4#include "../../../include/linux/perf_counter.h" 4#include "../../../include/linux/perf_event.h"
5#include <sys/types.h> 5#include <sys/types.h>
6#include "types.h" 6#include "types.h"
7 7
8struct perf_header_attr { 8struct perf_header_attr {
9 struct perf_counter_attr attr; 9 struct perf_event_attr attr;
10 int ids, size; 10 int ids, size;
11 u64 *id; 11 u64 *id;
12 off_t id_offset; 12 off_t id_offset;
@@ -34,11 +34,11 @@ char *perf_header__find_event(u64 id);
34 34
35 35
36struct perf_header_attr * 36struct perf_header_attr *
37perf_header_attr__new(struct perf_counter_attr *attr); 37perf_header_attr__new(struct perf_event_attr *attr);
38void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); 38void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
39 39
40u64 perf_header__sample_type(struct perf_header *header); 40u64 perf_header__sample_type(struct perf_header *header);
41struct perf_counter_attr * 41struct perf_event_attr *
42perf_header__find_attr(u64 id, struct perf_header *header); 42perf_header__find_attr(u64 id, struct perf_header *header);
43 43
44 44
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 89172fd0038b..13ab4b842d49 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,7 +10,7 @@
10 10
11int nr_counters; 11int nr_counters;
12 12
13struct perf_counter_attr attrs[MAX_COUNTERS]; 13struct perf_event_attr attrs[MAX_COUNTERS];
14 14
15struct event_symbol { 15struct event_symbol {
16 u8 type; 16 u8 type;
@@ -48,13 +48,13 @@ static struct event_symbol event_symbols[] = {
48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
49}; 49};
50 50
51#define __PERF_COUNTER_FIELD(config, name) \ 51#define __PERF_EVENT_FIELD(config, name) \
52 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) 52 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
53 53
54#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) 54#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
55#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) 55#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
56#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) 56#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
57#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) 57#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
58 58
59static const char *hw_event_names[] = { 59static const char *hw_event_names[] = {
60 "cycles", 60 "cycles",
@@ -352,7 +352,7 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
352} 352}
353 353
354static enum event_result 354static enum event_result
355parse_generic_hw_event(const char **str, struct perf_counter_attr *attr) 355parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
356{ 356{
357 const char *s = *str; 357 const char *s = *str;
358 int cache_type = -1, cache_op = -1, cache_result = -1; 358 int cache_type = -1, cache_op = -1, cache_result = -1;
@@ -417,7 +417,7 @@ parse_single_tracepoint_event(char *sys_name,
417 const char *evt_name, 417 const char *evt_name,
418 unsigned int evt_length, 418 unsigned int evt_length,
419 char *flags, 419 char *flags,
420 struct perf_counter_attr *attr, 420 struct perf_event_attr *attr,
421 const char **strp) 421 const char **strp)
422{ 422{
423 char evt_path[MAXPATHLEN]; 423 char evt_path[MAXPATHLEN];
@@ -505,7 +505,7 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
505 505
506 506
507static enum event_result parse_tracepoint_event(const char **strp, 507static enum event_result parse_tracepoint_event(const char **strp,
508 struct perf_counter_attr *attr) 508 struct perf_event_attr *attr)
509{ 509{
510 const char *evt_name; 510 const char *evt_name;
511 char *flags; 511 char *flags;
@@ -563,7 +563,7 @@ static int check_events(const char *str, unsigned int i)
563} 563}
564 564
565static enum event_result 565static enum event_result
566parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) 566parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
567{ 567{
568 const char *str = *strp; 568 const char *str = *strp;
569 unsigned int i; 569 unsigned int i;
@@ -582,7 +582,7 @@ parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
582} 582}
583 583
584static enum event_result 584static enum event_result
585parse_raw_event(const char **strp, struct perf_counter_attr *attr) 585parse_raw_event(const char **strp, struct perf_event_attr *attr)
586{ 586{
587 const char *str = *strp; 587 const char *str = *strp;
588 u64 config; 588 u64 config;
@@ -601,7 +601,7 @@ parse_raw_event(const char **strp, struct perf_counter_attr *attr)
601} 601}
602 602
603static enum event_result 603static enum event_result
604parse_numeric_event(const char **strp, struct perf_counter_attr *attr) 604parse_numeric_event(const char **strp, struct perf_event_attr *attr)
605{ 605{
606 const char *str = *strp; 606 const char *str = *strp;
607 char *endp; 607 char *endp;
@@ -623,7 +623,7 @@ parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
623} 623}
624 624
625static enum event_result 625static enum event_result
626parse_event_modifier(const char **strp, struct perf_counter_attr *attr) 626parse_event_modifier(const char **strp, struct perf_event_attr *attr)
627{ 627{
628 const char *str = *strp; 628 const char *str = *strp;
629 int eu = 1, ek = 1, eh = 1; 629 int eu = 1, ek = 1, eh = 1;
@@ -656,7 +656,7 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
656 * Symbolic names are (almost) exactly matched. 656 * Symbolic names are (almost) exactly matched.
657 */ 657 */
658static enum event_result 658static enum event_result
659parse_event_symbols(const char **str, struct perf_counter_attr *attr) 659parse_event_symbols(const char **str, struct perf_event_attr *attr)
660{ 660{
661 enum event_result ret; 661 enum event_result ret;
662 662
@@ -711,7 +711,7 @@ static void store_event_type(const char *orgname)
711 711
712int parse_events(const struct option *opt __used, const char *str, int unset __used) 712int parse_events(const struct option *opt __used, const char *str, int unset __used)
713{ 713{
714 struct perf_counter_attr attr; 714 struct perf_event_attr attr;
715 enum event_result ret; 715 enum event_result ret;
716 716
717 if (strchr(str, ':')) 717 if (strchr(str, ':'))
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 60704c15961f..30c608112845 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -16,7 +16,7 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16 16
17extern int nr_counters; 17extern int nr_counters;
18 18
19extern struct perf_counter_attr attrs[MAX_COUNTERS]; 19extern struct perf_event_attr attrs[MAX_COUNTERS];
20 20
21extern const char *event_name(int ctr); 21extern const char *event_name(int ctr);
22extern const char *__event_name(int type, u64 config); 22extern const char *__event_name(int type, u64 config);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 1fd824c1f1c4..af4b0573b37f 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -480,12 +480,12 @@ out:
480} 480}
481 481
482static struct tracepoint_path * 482static struct tracepoint_path *
483get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) 483get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
484{ 484{
485 struct tracepoint_path path, *ppath = &path; 485 struct tracepoint_path path, *ppath = &path;
486 int i; 486 int i;
487 487
488 for (i = 0; i < nb_counters; i++) { 488 for (i = 0; i < nb_events; i++) {
489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT) 489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
490 continue; 490 continue;
491 ppath->next = tracepoint_id_to_path(pattrs[i].config); 491 ppath->next = tracepoint_id_to_path(pattrs[i].config);
@@ -496,7 +496,7 @@ get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
496 496
497 return path.next; 497 return path.next;
498} 498}
499void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) 499void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
500{ 500{
501 char buf[BUFSIZ]; 501 char buf[BUFSIZ];
502 struct tracepoint_path *tps; 502 struct tracepoint_path *tps;
@@ -530,7 +530,7 @@ void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
530 page_size = getpagesize(); 530 page_size = getpagesize();
531 write_or_die(&page_size, 4); 531 write_or_die(&page_size, 4);
532 532
533 tps = get_tracepoints_path(pattrs, nb_counters); 533 tps = get_tracepoints_path(pattrs, nb_events);
534 534
535 read_header_files(); 535 read_header_files();
536 read_ftrace_files(tps); 536 read_ftrace_files(tps);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index d35ebf1e29ff..693f815c9429 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -240,6 +240,6 @@ unsigned long long
240raw_field_value(struct event *event, const char *name, void *data); 240raw_field_value(struct event *event, const char *name, void *data);
241void *raw_field_ptr(struct event *event, const char *name, void *data); 241void *raw_field_ptr(struct event *event, const char *name, void *data);
242 242
243void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters); 243void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
244 244
245#endif /* _TRACE_EVENTS_H */ 245#endif /* _TRACE_EVENTS_H */