diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/init_task.h | 14 | ||||
| -rw-r--r-- | include/linux/perf_counter.h | 497 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 858 | ||||
| -rw-r--r-- | include/linux/prctl.h | 4 | ||||
| -rw-r--r-- | include/linux/sched.h | 12 | ||||
| -rw-r--r-- | include/linux/syscalls.h | 6 |
6 files changed, 916 insertions, 475 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9e7f2e8fc66e..21a6f5d9af22 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -106,13 +106,13 @@ extern struct group_info init_groups; | |||
| 106 | 106 | ||
| 107 | extern struct cred init_cred; | 107 | extern struct cred init_cred; |
| 108 | 108 | ||
| 109 | #ifdef CONFIG_PERF_COUNTERS | 109 | #ifdef CONFIG_PERF_EVENTS |
| 110 | # define INIT_PERF_COUNTERS(tsk) \ | 110 | # define INIT_PERF_EVENTS(tsk) \ |
| 111 | .perf_counter_mutex = \ | 111 | .perf_event_mutex = \ |
| 112 | __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ | 112 | __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ |
| 113 | .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), | 113 | .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), |
| 114 | #else | 114 | #else |
| 115 | # define INIT_PERF_COUNTERS(tsk) | 115 | # define INIT_PERF_EVENTS(tsk) |
| 116 | #endif | 116 | #endif |
| 117 | 117 | ||
| 118 | /* | 118 | /* |
| @@ -178,7 +178,7 @@ extern struct cred init_cred; | |||
| 178 | }, \ | 178 | }, \ |
| 179 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ | 179 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ |
| 180 | INIT_IDS \ | 180 | INIT_IDS \ |
| 181 | INIT_PERF_COUNTERS(tsk) \ | 181 | INIT_PERF_EVENTS(tsk) \ |
| 182 | INIT_TRACE_IRQFLAGS \ | 182 | INIT_TRACE_IRQFLAGS \ |
| 183 | INIT_LOCKDEP \ | 183 | INIT_LOCKDEP \ |
| 184 | INIT_FTRACE_GRAPH \ | 184 | INIT_FTRACE_GRAPH \ |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 740caad09a44..368bd70f1d2d 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
| @@ -1,5 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Performance counters: | 2 | * NOTE: this file will be removed in a future kernel release, it is |
| 3 | * provided as a courtesy copy of user-space code that relies on the | ||
| 4 | * old (pre-rename) symbols and constants. | ||
| 5 | * | ||
| 6 | * Performance events: | ||
| 3 | * | 7 | * |
| 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 8 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 9 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar |
| @@ -131,19 +135,19 @@ enum perf_counter_sample_format { | |||
| 131 | * as specified by attr.read_format: | 135 | * as specified by attr.read_format: |
| 132 | * | 136 | * |
| 133 | * struct read_format { | 137 | * struct read_format { |
| 134 | * { u64 value; | 138 | * { u64 value; |
| 135 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 139 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 136 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 140 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 137 | * { u64 id; } && PERF_FORMAT_ID | 141 | * { u64 id; } && PERF_FORMAT_ID |
| 138 | * } && !PERF_FORMAT_GROUP | 142 | * } && !PERF_FORMAT_GROUP |
| 139 | * | 143 | * |
| 140 | * { u64 nr; | 144 | * { u64 nr; |
| 141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 145 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 146 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 143 | * { u64 value; | 147 | * { u64 value; |
| 144 | * { u64 id; } && PERF_FORMAT_ID | 148 | * { u64 id; } && PERF_FORMAT_ID |
| 145 | * } cntr[nr]; | 149 | * } cntr[nr]; |
| 146 | * } && PERF_FORMAT_GROUP | 150 | * } && PERF_FORMAT_GROUP |
| 147 | * }; | 151 | * }; |
| 148 | */ | 152 | */ |
| 149 | enum perf_counter_read_format { | 153 | enum perf_counter_read_format { |
| @@ -314,9 +318,9 @@ enum perf_event_type { | |||
| 314 | 318 | ||
| 315 | /* | 319 | /* |
| 316 | * struct { | 320 | * struct { |
| 317 | * struct perf_event_header header; | 321 | * struct perf_event_header header; |
| 318 | * u64 id; | 322 | * u64 id; |
| 319 | * u64 lost; | 323 | * u64 lost; |
| 320 | * }; | 324 | * }; |
| 321 | */ | 325 | */ |
| 322 | PERF_EVENT_LOST = 2, | 326 | PERF_EVENT_LOST = 2, |
| @@ -364,10 +368,10 @@ enum perf_event_type { | |||
| 364 | 368 | ||
| 365 | /* | 369 | /* |
| 366 | * struct { | 370 | * struct { |
| 367 | * struct perf_event_header header; | 371 | * struct perf_event_header header; |
| 368 | * u32 pid, tid; | 372 | * u32 pid, tid; |
| 369 | * | 373 | * |
| 370 | * struct read_format values; | 374 | * struct read_format values; |
| 371 | * }; | 375 | * }; |
| 372 | */ | 376 | */ |
| 373 | PERF_EVENT_READ = 8, | 377 | PERF_EVENT_READ = 8, |
| @@ -383,23 +387,23 @@ enum perf_event_type { | |||
| 383 | * { u64 id; } && PERF_SAMPLE_ID | 387 | * { u64 id; } && PERF_SAMPLE_ID |
| 384 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | 388 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID |
| 385 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 389 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
| 386 | * { u64 period; } && PERF_SAMPLE_PERIOD | 390 | * { u64 period; } && PERF_SAMPLE_PERIOD |
| 387 | * | 391 | * |
| 388 | * { struct read_format values; } && PERF_SAMPLE_READ | 392 | * { struct read_format values; } && PERF_SAMPLE_READ |
| 389 | * | 393 | * |
| 390 | * { u64 nr, | 394 | * { u64 nr, |
| 391 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 395 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
| 392 | * | 396 | * |
| 393 | * # | 397 | * # |
| 394 | * # The RAW record below is opaque data wrt the ABI | 398 | * # The RAW record below is opaque data wrt the ABI |
| 395 | * # | 399 | * # |
| 396 | * # That is, the ABI doesn't make any promises wrt to | 400 | * # That is, the ABI doesn't make any promises wrt to |
| 397 | * # the stability of its content, it may vary depending | 401 | * # the stability of its content, it may vary depending |
| 398 | * # on event, hardware, kernel version and phase of | 402 | * # on event, hardware, kernel version and phase of |
| 399 | * # the moon. | 403 | * # the moon. |
| 400 | * # | 404 | * # |
| 401 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | 405 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. |
| 402 | * # | 406 | * # |
| 403 | * | 407 | * |
| 404 | * { u32 size; | 408 | * { u32 size; |
| 405 | * char data[size];}&& PERF_SAMPLE_RAW | 409 | * char data[size];}&& PERF_SAMPLE_RAW |
| @@ -422,437 +426,16 @@ enum perf_callchain_context { | |||
| 422 | PERF_CONTEXT_MAX = (__u64)-4095, | 426 | PERF_CONTEXT_MAX = (__u64)-4095, |
| 423 | }; | 427 | }; |
| 424 | 428 | ||
| 425 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 429 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
| 426 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 430 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
| 427 | 431 | ||
| 428 | #ifdef __KERNEL__ | ||
| 429 | /* | 432 | /* |
| 430 | * Kernel-internal data types and definitions: | 433 | * In case some app still references the old symbols: |
| 431 | */ | ||
| 432 | |||
| 433 | #ifdef CONFIG_PERF_COUNTERS | ||
| 434 | # include <asm/perf_counter.h> | ||
| 435 | #endif | ||
| 436 | |||
| 437 | #include <linux/list.h> | ||
| 438 | #include <linux/mutex.h> | ||
| 439 | #include <linux/rculist.h> | ||
| 440 | #include <linux/rcupdate.h> | ||
| 441 | #include <linux/spinlock.h> | ||
| 442 | #include <linux/hrtimer.h> | ||
| 443 | #include <linux/fs.h> | ||
| 444 | #include <linux/pid_namespace.h> | ||
| 445 | #include <asm/atomic.h> | ||
| 446 | |||
| 447 | #define PERF_MAX_STACK_DEPTH 255 | ||
| 448 | |||
| 449 | struct perf_callchain_entry { | ||
| 450 | __u64 nr; | ||
| 451 | __u64 ip[PERF_MAX_STACK_DEPTH]; | ||
| 452 | }; | ||
| 453 | |||
| 454 | struct perf_raw_record { | ||
| 455 | u32 size; | ||
| 456 | void *data; | ||
| 457 | }; | ||
| 458 | |||
| 459 | struct task_struct; | ||
| 460 | |||
| 461 | /** | ||
| 462 | * struct hw_perf_counter - performance counter hardware details: | ||
| 463 | */ | 434 | */ |
| 464 | struct hw_perf_counter { | ||
| 465 | #ifdef CONFIG_PERF_COUNTERS | ||
| 466 | union { | ||
| 467 | struct { /* hardware */ | ||
| 468 | u64 config; | ||
| 469 | unsigned long config_base; | ||
| 470 | unsigned long counter_base; | ||
| 471 | int idx; | ||
| 472 | }; | ||
| 473 | union { /* software */ | ||
| 474 | atomic64_t count; | ||
| 475 | struct hrtimer hrtimer; | ||
| 476 | }; | ||
| 477 | }; | ||
| 478 | atomic64_t prev_count; | ||
| 479 | u64 sample_period; | ||
| 480 | u64 last_period; | ||
| 481 | atomic64_t period_left; | ||
| 482 | u64 interrupts; | ||
| 483 | |||
| 484 | u64 freq_count; | ||
| 485 | u64 freq_interrupts; | ||
| 486 | u64 freq_stamp; | ||
| 487 | #endif | ||
| 488 | }; | ||
| 489 | |||
| 490 | struct perf_counter; | ||
| 491 | |||
| 492 | /** | ||
| 493 | * struct pmu - generic performance monitoring unit | ||
| 494 | */ | ||
| 495 | struct pmu { | ||
| 496 | int (*enable) (struct perf_counter *counter); | ||
| 497 | void (*disable) (struct perf_counter *counter); | ||
| 498 | void (*read) (struct perf_counter *counter); | ||
| 499 | void (*unthrottle) (struct perf_counter *counter); | ||
| 500 | }; | ||
| 501 | |||
| 502 | /** | ||
| 503 | * enum perf_counter_active_state - the states of a counter | ||
| 504 | */ | ||
| 505 | enum perf_counter_active_state { | ||
| 506 | PERF_COUNTER_STATE_ERROR = -2, | ||
| 507 | PERF_COUNTER_STATE_OFF = -1, | ||
| 508 | PERF_COUNTER_STATE_INACTIVE = 0, | ||
| 509 | PERF_COUNTER_STATE_ACTIVE = 1, | ||
| 510 | }; | ||
| 511 | |||
| 512 | struct file; | ||
| 513 | 435 | ||
| 514 | struct perf_mmap_data { | 436 | #define __NR_perf_counter_open __NR_perf_event_open |
| 515 | struct rcu_head rcu_head; | ||
| 516 | int nr_pages; /* nr of data pages */ | ||
| 517 | int writable; /* are we writable */ | ||
| 518 | int nr_locked; /* nr pages mlocked */ | ||
| 519 | 437 | ||
| 520 | atomic_t poll; /* POLL_ for wakeups */ | 438 | #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE |
| 521 | atomic_t events; /* event limit */ | 439 | #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE |
| 522 | 440 | ||
| 523 | atomic_long_t head; /* write position */ | ||
| 524 | atomic_long_t done_head; /* completed head */ | ||
| 525 | |||
| 526 | atomic_t lock; /* concurrent writes */ | ||
| 527 | atomic_t wakeup; /* needs a wakeup */ | ||
| 528 | atomic_t lost; /* nr records lost */ | ||
| 529 | |||
| 530 | long watermark; /* wakeup watermark */ | ||
| 531 | |||
| 532 | struct perf_counter_mmap_page *user_page; | ||
| 533 | void *data_pages[0]; | ||
| 534 | }; | ||
| 535 | |||
| 536 | struct perf_pending_entry { | ||
| 537 | struct perf_pending_entry *next; | ||
| 538 | void (*func)(struct perf_pending_entry *); | ||
| 539 | }; | ||
| 540 | |||
| 541 | /** | ||
| 542 | * struct perf_counter - performance counter kernel representation: | ||
| 543 | */ | ||
| 544 | struct perf_counter { | ||
| 545 | #ifdef CONFIG_PERF_COUNTERS | ||
| 546 | struct list_head list_entry; | ||
| 547 | struct list_head event_entry; | ||
| 548 | struct list_head sibling_list; | ||
| 549 | int nr_siblings; | ||
| 550 | struct perf_counter *group_leader; | ||
| 551 | struct perf_counter *output; | ||
| 552 | const struct pmu *pmu; | ||
| 553 | |||
| 554 | enum perf_counter_active_state state; | ||
| 555 | atomic64_t count; | ||
| 556 | |||
| 557 | /* | ||
| 558 | * These are the total time in nanoseconds that the counter | ||
| 559 | * has been enabled (i.e. eligible to run, and the task has | ||
| 560 | * been scheduled in, if this is a per-task counter) | ||
| 561 | * and running (scheduled onto the CPU), respectively. | ||
| 562 | * | ||
| 563 | * They are computed from tstamp_enabled, tstamp_running and | ||
| 564 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | ||
| 565 | */ | ||
| 566 | u64 total_time_enabled; | ||
| 567 | u64 total_time_running; | ||
| 568 | |||
| 569 | /* | ||
| 570 | * These are timestamps used for computing total_time_enabled | ||
| 571 | * and total_time_running when the counter is in INACTIVE or | ||
| 572 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
| 573 | * in time. | ||
| 574 | * tstamp_enabled: the notional time when the counter was enabled | ||
| 575 | * tstamp_running: the notional time when the counter was scheduled on | ||
| 576 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
| 577 | * counter was scheduled off. | ||
| 578 | */ | ||
| 579 | u64 tstamp_enabled; | ||
| 580 | u64 tstamp_running; | ||
| 581 | u64 tstamp_stopped; | ||
| 582 | |||
| 583 | struct perf_counter_attr attr; | ||
| 584 | struct hw_perf_counter hw; | ||
| 585 | |||
| 586 | struct perf_counter_context *ctx; | ||
| 587 | struct file *filp; | ||
| 588 | |||
| 589 | /* | ||
| 590 | * These accumulate total time (in nanoseconds) that children | ||
| 591 | * counters have been enabled and running, respectively. | ||
| 592 | */ | ||
| 593 | atomic64_t child_total_time_enabled; | ||
| 594 | atomic64_t child_total_time_running; | ||
| 595 | |||
| 596 | /* | ||
| 597 | * Protect attach/detach and child_list: | ||
| 598 | */ | ||
| 599 | struct mutex child_mutex; | ||
| 600 | struct list_head child_list; | ||
| 601 | struct perf_counter *parent; | ||
| 602 | |||
| 603 | int oncpu; | ||
| 604 | int cpu; | ||
| 605 | |||
| 606 | struct list_head owner_entry; | ||
| 607 | struct task_struct *owner; | ||
| 608 | |||
| 609 | /* mmap bits */ | ||
| 610 | struct mutex mmap_mutex; | ||
| 611 | atomic_t mmap_count; | ||
| 612 | struct perf_mmap_data *data; | ||
| 613 | |||
| 614 | /* poll related */ | ||
| 615 | wait_queue_head_t waitq; | ||
| 616 | struct fasync_struct *fasync; | ||
| 617 | |||
| 618 | /* delayed work for NMIs and such */ | ||
| 619 | int pending_wakeup; | ||
| 620 | int pending_kill; | ||
| 621 | int pending_disable; | ||
| 622 | struct perf_pending_entry pending; | ||
| 623 | |||
| 624 | atomic_t event_limit; | ||
| 625 | |||
| 626 | void (*destroy)(struct perf_counter *); | ||
| 627 | struct rcu_head rcu_head; | ||
| 628 | |||
| 629 | struct pid_namespace *ns; | ||
| 630 | u64 id; | ||
| 631 | #endif | ||
| 632 | }; | ||
| 633 | |||
| 634 | /** | ||
| 635 | * struct perf_counter_context - counter context structure | ||
| 636 | * | ||
| 637 | * Used as a container for task counters and CPU counters as well: | ||
| 638 | */ | ||
| 639 | struct perf_counter_context { | ||
| 640 | /* | ||
| 641 | * Protect the states of the counters in the list, | ||
| 642 | * nr_active, and the list: | ||
| 643 | */ | ||
| 644 | spinlock_t lock; | ||
| 645 | /* | ||
| 646 | * Protect the list of counters. Locking either mutex or lock | ||
| 647 | * is sufficient to ensure the list doesn't change; to change | ||
| 648 | * the list you need to lock both the mutex and the spinlock. | ||
| 649 | */ | ||
| 650 | struct mutex mutex; | ||
| 651 | |||
| 652 | struct list_head counter_list; | ||
| 653 | struct list_head event_list; | ||
| 654 | int nr_counters; | ||
| 655 | int nr_active; | ||
| 656 | int is_active; | ||
| 657 | int nr_stat; | ||
| 658 | atomic_t refcount; | ||
| 659 | struct task_struct *task; | ||
| 660 | |||
| 661 | /* | ||
| 662 | * Context clock, runs when context enabled. | ||
| 663 | */ | ||
| 664 | u64 time; | ||
| 665 | u64 timestamp; | ||
| 666 | |||
| 667 | /* | ||
| 668 | * These fields let us detect when two contexts have both | ||
| 669 | * been cloned (inherited) from a common ancestor. | ||
| 670 | */ | ||
| 671 | struct perf_counter_context *parent_ctx; | ||
| 672 | u64 parent_gen; | ||
| 673 | u64 generation; | ||
| 674 | int pin_count; | ||
| 675 | struct rcu_head rcu_head; | ||
| 676 | }; | ||
| 677 | |||
| 678 | /** | ||
| 679 | * struct perf_counter_cpu_context - per cpu counter context structure | ||
| 680 | */ | ||
| 681 | struct perf_cpu_context { | ||
| 682 | struct perf_counter_context ctx; | ||
| 683 | struct perf_counter_context *task_ctx; | ||
| 684 | int active_oncpu; | ||
| 685 | int max_pertask; | ||
| 686 | int exclusive; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Recursion avoidance: | ||
| 690 | * | ||
| 691 | * task, softirq, irq, nmi context | ||
| 692 | */ | ||
| 693 | int recursion[4]; | ||
| 694 | }; | ||
| 695 | |||
| 696 | struct perf_output_handle { | ||
| 697 | struct perf_counter *counter; | ||
| 698 | struct perf_mmap_data *data; | ||
| 699 | unsigned long head; | ||
| 700 | unsigned long offset; | ||
| 701 | int nmi; | ||
| 702 | int sample; | ||
| 703 | int locked; | ||
| 704 | unsigned long flags; | ||
| 705 | }; | ||
| 706 | |||
| 707 | #ifdef CONFIG_PERF_COUNTERS | ||
| 708 | |||
| 709 | /* | ||
| 710 | * Set by architecture code: | ||
| 711 | */ | ||
| 712 | extern int perf_max_counters; | ||
| 713 | |||
| 714 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); | ||
| 715 | |||
| 716 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | ||
| 717 | extern void perf_counter_task_sched_out(struct task_struct *task, | ||
| 718 | struct task_struct *next, int cpu); | ||
| 719 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | ||
| 720 | extern int perf_counter_init_task(struct task_struct *child); | ||
| 721 | extern void perf_counter_exit_task(struct task_struct *child); | ||
| 722 | extern void perf_counter_free_task(struct task_struct *task); | ||
| 723 | extern void set_perf_counter_pending(void); | ||
| 724 | extern void perf_counter_do_pending(void); | ||
| 725 | extern void perf_counter_print_debug(void); | ||
| 726 | extern void __perf_disable(void); | ||
| 727 | extern bool __perf_enable(void); | ||
| 728 | extern void perf_disable(void); | ||
| 729 | extern void perf_enable(void); | ||
| 730 | extern int perf_counter_task_disable(void); | ||
| 731 | extern int perf_counter_task_enable(void); | ||
| 732 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
| 733 | struct perf_cpu_context *cpuctx, | ||
| 734 | struct perf_counter_context *ctx, int cpu); | ||
| 735 | extern void perf_counter_update_userpage(struct perf_counter *counter); | ||
| 736 | |||
| 737 | struct perf_sample_data { | ||
| 738 | u64 type; | ||
| 739 | |||
| 740 | u64 ip; | ||
| 741 | struct { | ||
| 742 | u32 pid; | ||
| 743 | u32 tid; | ||
| 744 | } tid_entry; | ||
| 745 | u64 time; | ||
| 746 | u64 addr; | ||
| 747 | u64 id; | ||
| 748 | u64 stream_id; | ||
| 749 | struct { | ||
| 750 | u32 cpu; | ||
| 751 | u32 reserved; | ||
| 752 | } cpu_entry; | ||
| 753 | u64 period; | ||
| 754 | struct perf_callchain_entry *callchain; | ||
| 755 | struct perf_raw_record *raw; | ||
| 756 | }; | ||
| 757 | |||
| 758 | extern void perf_output_sample(struct perf_output_handle *handle, | ||
| 759 | struct perf_event_header *header, | ||
| 760 | struct perf_sample_data *data, | ||
| 761 | struct perf_counter *counter); | ||
| 762 | extern void perf_prepare_sample(struct perf_event_header *header, | ||
| 763 | struct perf_sample_data *data, | ||
| 764 | struct perf_counter *counter, | ||
| 765 | struct pt_regs *regs); | ||
| 766 | |||
| 767 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
| 768 | struct perf_sample_data *data, | ||
| 769 | struct pt_regs *regs); | ||
| 770 | |||
| 771 | /* | ||
| 772 | * Return 1 for a software counter, 0 for a hardware counter | ||
| 773 | */ | ||
| 774 | static inline int is_software_counter(struct perf_counter *counter) | ||
| 775 | { | ||
| 776 | return (counter->attr.type != PERF_TYPE_RAW) && | ||
| 777 | (counter->attr.type != PERF_TYPE_HARDWARE) && | ||
| 778 | (counter->attr.type != PERF_TYPE_HW_CACHE); | ||
| 779 | } | ||
| 780 | |||
| 781 | extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | ||
| 782 | |||
| 783 | extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
| 784 | |||
| 785 | static inline void | ||
| 786 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
| 787 | { | ||
| 788 | if (atomic_read(&perf_swcounter_enabled[event])) | ||
| 789 | __perf_swcounter_event(event, nr, nmi, regs, addr); | ||
| 790 | } | ||
| 791 | |||
| 792 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | ||
| 793 | |||
| 794 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | ||
| 795 | { | ||
| 796 | if (vma->vm_flags & VM_EXEC) | ||
| 797 | __perf_counter_mmap(vma); | ||
| 798 | } | ||
| 799 | |||
| 800 | extern void perf_counter_comm(struct task_struct *tsk); | ||
| 801 | extern void perf_counter_fork(struct task_struct *tsk); | ||
| 802 | |||
| 803 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
| 804 | |||
| 805 | extern int sysctl_perf_counter_paranoid; | ||
| 806 | extern int sysctl_perf_counter_mlock; | ||
| 807 | extern int sysctl_perf_counter_sample_rate; | ||
| 808 | |||
| 809 | extern void perf_counter_init(void); | ||
| 810 | extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, | ||
| 811 | void *record, int entry_size); | ||
| 812 | |||
| 813 | #ifndef perf_misc_flags | ||
| 814 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | ||
| 815 | PERF_EVENT_MISC_KERNEL) | ||
| 816 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
| 817 | #endif | ||
| 818 | |||
| 819 | extern int perf_output_begin(struct perf_output_handle *handle, | ||
| 820 | struct perf_counter *counter, unsigned int size, | ||
| 821 | int nmi, int sample); | ||
| 822 | extern void perf_output_end(struct perf_output_handle *handle); | ||
| 823 | extern void perf_output_copy(struct perf_output_handle *handle, | ||
| 824 | const void *buf, unsigned int len); | ||
| 825 | #else | ||
| 826 | static inline void | ||
| 827 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | ||
| 828 | static inline void | ||
| 829 | perf_counter_task_sched_out(struct task_struct *task, | ||
| 830 | struct task_struct *next, int cpu) { } | ||
| 831 | static inline void | ||
| 832 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | ||
| 833 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } | ||
| 834 | static inline void perf_counter_exit_task(struct task_struct *child) { } | ||
| 835 | static inline void perf_counter_free_task(struct task_struct *task) { } | ||
| 836 | static inline void perf_counter_do_pending(void) { } | ||
| 837 | static inline void perf_counter_print_debug(void) { } | ||
| 838 | static inline void perf_disable(void) { } | ||
| 839 | static inline void perf_enable(void) { } | ||
| 840 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | ||
| 841 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | ||
| 842 | |||
| 843 | static inline void | ||
| 844 | perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
| 845 | struct pt_regs *regs, u64 addr) { } | ||
| 846 | |||
| 847 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | ||
| 848 | static inline void perf_counter_comm(struct task_struct *tsk) { } | ||
| 849 | static inline void perf_counter_fork(struct task_struct *tsk) { } | ||
| 850 | static inline void perf_counter_init(void) { } | ||
| 851 | |||
| 852 | #endif | ||
| 853 | |||
| 854 | #define perf_output_put(handle, x) \ | ||
| 855 | perf_output_copy((handle), &(x), sizeof(x)) | ||
| 856 | |||
| 857 | #endif /* __KERNEL__ */ | ||
| 858 | #endif /* _LINUX_PERF_COUNTER_H */ | 441 | #endif /* _LINUX_PERF_COUNTER_H */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h new file mode 100644 index 000000000000..acefaf71e6dd --- /dev/null +++ b/include/linux/perf_event.h | |||
| @@ -0,0 +1,858 @@ | |||
| 1 | /* | ||
| 2 | * Performance events: | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
| 5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
| 6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
| 7 | * | ||
| 8 | * Data type definitions, declarations, prototypes. | ||
| 9 | * | ||
| 10 | * Started by: Thomas Gleixner and Ingo Molnar | ||
| 11 | * | ||
| 12 | * For licencing details see kernel-base/COPYING | ||
| 13 | */ | ||
| 14 | #ifndef _LINUX_PERF_EVENT_H | ||
| 15 | #define _LINUX_PERF_EVENT_H | ||
| 16 | |||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/ioctl.h> | ||
| 19 | #include <asm/byteorder.h> | ||
| 20 | |||
| 21 | /* | ||
| 22 | * User-space ABI bits: | ||
| 23 | */ | ||
| 24 | |||
| 25 | /* | ||
| 26 | * attr.type | ||
| 27 | */ | ||
| 28 | enum perf_type_id { | ||
| 29 | PERF_TYPE_HARDWARE = 0, | ||
| 30 | PERF_TYPE_SOFTWARE = 1, | ||
| 31 | PERF_TYPE_TRACEPOINT = 2, | ||
| 32 | PERF_TYPE_HW_CACHE = 3, | ||
| 33 | PERF_TYPE_RAW = 4, | ||
| 34 | |||
| 35 | PERF_TYPE_MAX, /* non-ABI */ | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Generalized performance event event_id types, used by the | ||
| 40 | * attr.event_id parameter of the sys_perf_event_open() | ||
| 41 | * syscall: | ||
| 42 | */ | ||
| 43 | enum perf_hw_id { | ||
| 44 | /* | ||
| 45 | * Common hardware events, generalized by the kernel: | ||
| 46 | */ | ||
| 47 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
| 48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
| 49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
| 50 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
| 51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
| 52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
| 53 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
| 54 | |||
| 55 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Generalized hardware cache events: | ||
| 60 | * | ||
| 61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
| 62 | * { read, write, prefetch } x | ||
| 63 | * { accesses, misses } | ||
| 64 | */ | ||
| 65 | enum perf_hw_cache_id { | ||
| 66 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
| 67 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
| 68 | PERF_COUNT_HW_CACHE_LL = 2, | ||
| 69 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
| 70 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
| 71 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
| 72 | |||
| 73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
| 74 | }; | ||
| 75 | |||
| 76 | enum perf_hw_cache_op_id { | ||
| 77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
| 78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
| 79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
| 80 | |||
| 81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
| 82 | }; | ||
| 83 | |||
| 84 | enum perf_hw_cache_op_result_id { | ||
| 85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
| 86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
| 87 | |||
| 88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | /* | ||
| 92 | * Special "software" events provided by the kernel, even if the hardware | ||
| 93 | * does not support performance events. These events measure various | ||
| 94 | * physical and sw events of the kernel (and allow the profiling of them as | ||
| 95 | * well): | ||
| 96 | */ | ||
| 97 | enum perf_sw_ids { | ||
| 98 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
| 99 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
| 100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
| 101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
| 102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
| 103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
| 104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
| 105 | |||
| 106 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
| 107 | }; | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Bits that can be set in attr.sample_type to request information | ||
| 111 | * in the overflow packets. | ||
| 112 | */ | ||
| 113 | enum perf_event_sample_format { | ||
| 114 | PERF_SAMPLE_IP = 1U << 0, | ||
| 115 | PERF_SAMPLE_TID = 1U << 1, | ||
| 116 | PERF_SAMPLE_TIME = 1U << 2, | ||
| 117 | PERF_SAMPLE_ADDR = 1U << 3, | ||
| 118 | PERF_SAMPLE_READ = 1U << 4, | ||
| 119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
| 120 | PERF_SAMPLE_ID = 1U << 6, | ||
| 121 | PERF_SAMPLE_CPU = 1U << 7, | ||
| 122 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
| 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | ||
| 124 | PERF_SAMPLE_RAW = 1U << 10, | ||
| 125 | |||
| 126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | ||
| 127 | }; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * The format of the data returned by read() on a perf event fd, | ||
| 131 | * as specified by attr.read_format: | ||
| 132 | * | ||
| 133 | * struct read_format { | ||
| 134 | * { u64 value; | ||
| 135 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
| 136 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
| 137 | * { u64 id; } && PERF_FORMAT_ID | ||
| 138 | * } && !PERF_FORMAT_GROUP | ||
| 139 | * | ||
| 140 | * { u64 nr; | ||
| 141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
| 142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
| 143 | * { u64 value; | ||
| 144 | * { u64 id; } && PERF_FORMAT_ID | ||
| 145 | * } cntr[nr]; | ||
| 146 | * } && PERF_FORMAT_GROUP | ||
| 147 | * }; | ||
| 148 | */ | ||
| 149 | enum perf_event_read_format { | ||
| 150 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
| 151 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
| 152 | PERF_FORMAT_ID = 1U << 2, | ||
| 153 | PERF_FORMAT_GROUP = 1U << 3, | ||
| 154 | |||
| 155 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ | ||
| 156 | }; | ||
| 157 | |||
| 158 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | ||
| 159 | |||
| 160 | /* | ||
| 161 | * Hardware event_id to monitor via a performance monitoring event: | ||
| 162 | */ | ||
| 163 | struct perf_event_attr { | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Major type: hardware/software/tracepoint/etc. | ||
| 167 | */ | ||
| 168 | __u32 type; | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Size of the attr structure, for fwd/bwd compat. | ||
| 172 | */ | ||
| 173 | __u32 size; | ||
| 174 | |||
| 175 | /* | ||
| 176 | * Type specific configuration information. | ||
| 177 | */ | ||
| 178 | __u64 config; | ||
| 179 | |||
| 180 | union { | ||
| 181 | __u64 sample_period; | ||
| 182 | __u64 sample_freq; | ||
| 183 | }; | ||
| 184 | |||
| 185 | __u64 sample_type; | ||
| 186 | __u64 read_format; | ||
| 187 | |||
| 188 | __u64 disabled : 1, /* off by default */ | ||
| 189 | inherit : 1, /* children inherit it */ | ||
| 190 | pinned : 1, /* must always be on PMU */ | ||
| 191 | exclusive : 1, /* only group on PMU */ | ||
| 192 | exclude_user : 1, /* don't count user */ | ||
| 193 | exclude_kernel : 1, /* ditto kernel */ | ||
| 194 | exclude_hv : 1, /* ditto hypervisor */ | ||
| 195 | exclude_idle : 1, /* don't count when idle */ | ||
| 196 | mmap : 1, /* include mmap data */ | ||
| 197 | comm : 1, /* include comm data */ | ||
| 198 | freq : 1, /* use freq, not period */ | ||
| 199 | inherit_stat : 1, /* per task counts */ | ||
| 200 | enable_on_exec : 1, /* next exec enables */ | ||
| 201 | task : 1, /* trace fork/exit */ | ||
| 202 | watermark : 1, /* wakeup_watermark */ | ||
| 203 | |||
| 204 | __reserved_1 : 49; | ||
| 205 | |||
| 206 | union { | ||
| 207 | __u32 wakeup_events; /* wakeup every n events */ | ||
| 208 | __u32 wakeup_watermark; /* bytes before wakeup */ | ||
| 209 | }; | ||
| 210 | __u32 __reserved_2; | ||
| 211 | |||
| 212 | __u64 __reserved_3; | ||
| 213 | }; | ||
| 214 | |||
| 215 | /* | ||
| 216 | * Ioctls that can be done on a perf event fd: | ||
| 217 | */ | ||
| 218 | #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) | ||
| 219 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | ||
| 220 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | ||
| 221 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | ||
| 222 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) | ||
| 223 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | ||
| 224 | |||
| 225 | enum perf_event_ioc_flags { | ||
| 226 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
| 227 | }; | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Structure of the page that can be mapped via mmap | ||
| 231 | */ | ||
| 232 | struct perf_event_mmap_page { | ||
| 233 | __u32 version; /* version number of this structure */ | ||
| 234 | __u32 compat_version; /* lowest version this is compat with */ | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Bits needed to read the hw events in user-space. | ||
| 238 | * | ||
| 239 | * u32 seq; | ||
| 240 | * s64 count; | ||
| 241 | * | ||
| 242 | * do { | ||
| 243 | * seq = pc->lock; | ||
| 244 | * | ||
| 245 | * barrier() | ||
| 246 | * if (pc->index) { | ||
| 247 | * count = pmc_read(pc->index - 1); | ||
| 248 | * count += pc->offset; | ||
| 249 | * } else | ||
| 250 | * goto regular_read; | ||
| 251 | * | ||
| 252 | * barrier(); | ||
| 253 | * } while (pc->lock != seq); | ||
| 254 | * | ||
| 255 | * NOTE: for obvious reason this only works on self-monitoring | ||
| 256 | * processes. | ||
| 257 | */ | ||
| 258 | __u32 lock; /* seqlock for synchronization */ | ||
| 259 | __u32 index; /* hardware event identifier */ | ||
| 260 | __s64 offset; /* add to hardware event value */ | ||
| 261 | __u64 time_enabled; /* time event active */ | ||
| 262 | __u64 time_running; /* time event on cpu */ | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Hole for extension of the self monitor capabilities | ||
| 266 | */ | ||
| 267 | |||
| 268 | __u64 __reserved[123]; /* align to 1k */ | ||
| 269 | |||
| 270 | /* | ||
| 271 | * Control data for the mmap() data buffer. | ||
| 272 | * | ||
| 273 | * User-space reading the @data_head value should issue an rmb(), on | ||
| 274 | * SMP capable platforms, after reading this value -- see | ||
| 275 | * perf_event_wakeup(). | ||
| 276 | * | ||
| 277 | * When the mapping is PROT_WRITE the @data_tail value should be | ||
| 278 | * written by userspace to reflect the last read data. In this case | ||
| 279 | * the kernel will not over-write unread data. | ||
| 280 | */ | ||
| 281 | __u64 data_head; /* head in the data section */ | ||
| 282 | __u64 data_tail; /* user-space written tail */ | ||
| 283 | }; | ||
| 284 | |||
| 285 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | ||
| 286 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
| 287 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | ||
| 288 | #define PERF_RECORD_MISC_USER (2 << 0) | ||
| 289 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | ||
| 290 | |||
| 291 | struct perf_event_header { | ||
| 292 | __u32 type; | ||
| 293 | __u16 misc; | ||
| 294 | __u16 size; | ||
| 295 | }; | ||
| 296 | |||
| 297 | enum perf_event_type { | ||
| 298 | |||
| 299 | /* | ||
| 300 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
| 301 | * correlate userspace IPs to code. They have the following structure: | ||
| 302 | * | ||
| 303 | * struct { | ||
| 304 | * struct perf_event_header header; | ||
| 305 | * | ||
| 306 | * u32 pid, tid; | ||
| 307 | * u64 addr; | ||
| 308 | * u64 len; | ||
| 309 | * u64 pgoff; | ||
| 310 | * char filename[]; | ||
| 311 | * }; | ||
| 312 | */ | ||
| 313 | PERF_RECORD_MMAP = 1, | ||
| 314 | |||
| 315 | /* | ||
| 316 | * struct { | ||
| 317 | * struct perf_event_header header; | ||
| 318 | * u64 id; | ||
| 319 | * u64 lost; | ||
| 320 | * }; | ||
| 321 | */ | ||
| 322 | PERF_RECORD_LOST = 2, | ||
| 323 | |||
| 324 | /* | ||
| 325 | * struct { | ||
| 326 | * struct perf_event_header header; | ||
| 327 | * | ||
| 328 | * u32 pid, tid; | ||
| 329 | * char comm[]; | ||
| 330 | * }; | ||
| 331 | */ | ||
| 332 | PERF_RECORD_COMM = 3, | ||
| 333 | |||
| 334 | /* | ||
| 335 | * struct { | ||
| 336 | * struct perf_event_header header; | ||
| 337 | * u32 pid, ppid; | ||
| 338 | * u32 tid, ptid; | ||
| 339 | * u64 time; | ||
| 340 | * }; | ||
| 341 | */ | ||
| 342 | PERF_RECORD_EXIT = 4, | ||
| 343 | |||
| 344 | /* | ||
| 345 | * struct { | ||
| 346 | * struct perf_event_header header; | ||
| 347 | * u64 time; | ||
| 348 | * u64 id; | ||
| 349 | * u64 stream_id; | ||
| 350 | * }; | ||
| 351 | */ | ||
| 352 | PERF_RECORD_THROTTLE = 5, | ||
| 353 | PERF_RECORD_UNTHROTTLE = 6, | ||
| 354 | |||
| 355 | /* | ||
| 356 | * struct { | ||
| 357 | * struct perf_event_header header; | ||
| 358 | * u32 pid, ppid; | ||
| 359 | * u32 tid, ptid; | ||
| 360 | * { u64 time; } && PERF_SAMPLE_TIME | ||
| 361 | * }; | ||
| 362 | */ | ||
| 363 | PERF_RECORD_FORK = 7, | ||
| 364 | |||
| 365 | /* | ||
| 366 | * struct { | ||
| 367 | * struct perf_event_header header; | ||
| 368 | * u32 pid, tid; | ||
| 369 | * | ||
| 370 | * struct read_format values; | ||
| 371 | * }; | ||
| 372 | */ | ||
| 373 | PERF_RECORD_READ = 8, | ||
| 374 | |||
| 375 | /* | ||
| 376 | * struct { | ||
| 377 | * struct perf_event_header header; | ||
| 378 | * | ||
| 379 | * { u64 ip; } && PERF_SAMPLE_IP | ||
| 380 | * { u32 pid, tid; } && PERF_SAMPLE_TID | ||
| 381 | * { u64 time; } && PERF_SAMPLE_TIME | ||
| 382 | * { u64 addr; } && PERF_SAMPLE_ADDR | ||
| 383 | * { u64 id; } && PERF_SAMPLE_ID | ||
| 384 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | ||
| 385 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | ||
| 386 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
| 387 | * | ||
| 388 | * { struct read_format values; } && PERF_SAMPLE_READ | ||
| 389 | * | ||
| 390 | * { u64 nr, | ||
| 391 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | ||
| 392 | * | ||
| 393 | * # | ||
| 394 | * # The RAW record below is opaque data wrt the ABI | ||
| 395 | * # | ||
| 396 | * # That is, the ABI doesn't make any promises wrt to | ||
| 397 | * # the stability of its content, it may vary depending | ||
| 398 | * # on event, hardware, kernel version and phase of | ||
| 399 | * # the moon. | ||
| 400 | * # | ||
| 401 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | ||
| 402 | * # | ||
| 403 | * | ||
| 404 | * { u32 size; | ||
| 405 | * char data[size];}&& PERF_SAMPLE_RAW | ||
| 406 | * }; | ||
| 407 | */ | ||
| 408 | PERF_RECORD_SAMPLE = 9, | ||
| 409 | |||
| 410 | PERF_RECORD_MAX, /* non-ABI */ | ||
| 411 | }; | ||
| 412 | |||
| 413 | enum perf_callchain_context { | ||
| 414 | PERF_CONTEXT_HV = (__u64)-32, | ||
| 415 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
| 416 | PERF_CONTEXT_USER = (__u64)-512, | ||
| 417 | |||
| 418 | PERF_CONTEXT_GUEST = (__u64)-2048, | ||
| 419 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | ||
| 420 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | ||
| 421 | |||
| 422 | PERF_CONTEXT_MAX = (__u64)-4095, | ||
| 423 | }; | ||
| 424 | |||
| 425 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | ||
| 426 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | ||
| 427 | |||
| 428 | #ifdef __KERNEL__ | ||
| 429 | /* | ||
| 430 | * Kernel-internal data types and definitions: | ||
| 431 | */ | ||
| 432 | |||
| 433 | #ifdef CONFIG_PERF_EVENTS | ||
| 434 | # include <asm/perf_event.h> | ||
| 435 | #endif | ||
| 436 | |||
| 437 | #include <linux/list.h> | ||
| 438 | #include <linux/mutex.h> | ||
| 439 | #include <linux/rculist.h> | ||
| 440 | #include <linux/rcupdate.h> | ||
| 441 | #include <linux/spinlock.h> | ||
| 442 | #include <linux/hrtimer.h> | ||
| 443 | #include <linux/fs.h> | ||
| 444 | #include <linux/pid_namespace.h> | ||
| 445 | #include <asm/atomic.h> | ||
| 446 | |||
| 447 | #define PERF_MAX_STACK_DEPTH 255 | ||
| 448 | |||
| 449 | struct perf_callchain_entry { | ||
| 450 | __u64 nr; | ||
| 451 | __u64 ip[PERF_MAX_STACK_DEPTH]; | ||
| 452 | }; | ||
| 453 | |||
| 454 | struct perf_raw_record { | ||
| 455 | u32 size; | ||
| 456 | void *data; | ||
| 457 | }; | ||
| 458 | |||
| 459 | struct task_struct; | ||
| 460 | |||
| 461 | /** | ||
| 462 | * struct hw_perf_event - performance event hardware details: | ||
| 463 | */ | ||
| 464 | struct hw_perf_event { | ||
| 465 | #ifdef CONFIG_PERF_EVENTS | ||
| 466 | union { | ||
| 467 | struct { /* hardware */ | ||
| 468 | u64 config; | ||
| 469 | unsigned long config_base; | ||
| 470 | unsigned long event_base; | ||
| 471 | int idx; | ||
| 472 | }; | ||
| 473 | union { /* software */ | ||
| 474 | atomic64_t count; | ||
| 475 | struct hrtimer hrtimer; | ||
| 476 | }; | ||
| 477 | }; | ||
| 478 | atomic64_t prev_count; | ||
| 479 | u64 sample_period; | ||
| 480 | u64 last_period; | ||
| 481 | atomic64_t period_left; | ||
| 482 | u64 interrupts; | ||
| 483 | |||
| 484 | u64 freq_count; | ||
| 485 | u64 freq_interrupts; | ||
| 486 | u64 freq_stamp; | ||
| 487 | #endif | ||
| 488 | }; | ||
| 489 | |||
| 490 | struct perf_event; | ||
| 491 | |||
| 492 | /** | ||
| 493 | * struct pmu - generic performance monitoring unit | ||
| 494 | */ | ||
| 495 | struct pmu { | ||
| 496 | int (*enable) (struct perf_event *event); | ||
| 497 | void (*disable) (struct perf_event *event); | ||
| 498 | void (*read) (struct perf_event *event); | ||
| 499 | void (*unthrottle) (struct perf_event *event); | ||
| 500 | }; | ||
| 501 | |||
| 502 | /** | ||
| 503 | * enum perf_event_active_state - the states of a event | ||
| 504 | */ | ||
| 505 | enum perf_event_active_state { | ||
| 506 | PERF_EVENT_STATE_ERROR = -2, | ||
| 507 | PERF_EVENT_STATE_OFF = -1, | ||
| 508 | PERF_EVENT_STATE_INACTIVE = 0, | ||
| 509 | PERF_EVENT_STATE_ACTIVE = 1, | ||
| 510 | }; | ||
| 511 | |||
| 512 | struct file; | ||
| 513 | |||
| 514 | struct perf_mmap_data { | ||
| 515 | struct rcu_head rcu_head; | ||
| 516 | int nr_pages; /* nr of data pages */ | ||
| 517 | int writable; /* are we writable */ | ||
| 518 | int nr_locked; /* nr pages mlocked */ | ||
| 519 | |||
| 520 | atomic_t poll; /* POLL_ for wakeups */ | ||
| 521 | atomic_t events; /* event_id limit */ | ||
| 522 | |||
| 523 | atomic_long_t head; /* write position */ | ||
| 524 | atomic_long_t done_head; /* completed head */ | ||
| 525 | |||
| 526 | atomic_t lock; /* concurrent writes */ | ||
| 527 | atomic_t wakeup; /* needs a wakeup */ | ||
| 528 | atomic_t lost; /* nr records lost */ | ||
| 529 | |||
| 530 | long watermark; /* wakeup watermark */ | ||
| 531 | |||
| 532 | struct perf_event_mmap_page *user_page; | ||
| 533 | void *data_pages[0]; | ||
| 534 | }; | ||
| 535 | |||
| 536 | struct perf_pending_entry { | ||
| 537 | struct perf_pending_entry *next; | ||
| 538 | void (*func)(struct perf_pending_entry *); | ||
| 539 | }; | ||
| 540 | |||
| 541 | /** | ||
| 542 | * struct perf_event - performance event kernel representation: | ||
| 543 | */ | ||
| 544 | struct perf_event { | ||
| 545 | #ifdef CONFIG_PERF_EVENTS | ||
| 546 | struct list_head group_entry; | ||
| 547 | struct list_head event_entry; | ||
| 548 | struct list_head sibling_list; | ||
| 549 | int nr_siblings; | ||
| 550 | struct perf_event *group_leader; | ||
| 551 | struct perf_event *output; | ||
| 552 | const struct pmu *pmu; | ||
| 553 | |||
| 554 | enum perf_event_active_state state; | ||
| 555 | atomic64_t count; | ||
| 556 | |||
| 557 | /* | ||
| 558 | * These are the total time in nanoseconds that the event | ||
| 559 | * has been enabled (i.e. eligible to run, and the task has | ||
| 560 | * been scheduled in, if this is a per-task event) | ||
| 561 | * and running (scheduled onto the CPU), respectively. | ||
| 562 | * | ||
| 563 | * They are computed from tstamp_enabled, tstamp_running and | ||
| 564 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | ||
| 565 | */ | ||
| 566 | u64 total_time_enabled; | ||
| 567 | u64 total_time_running; | ||
| 568 | |||
| 569 | /* | ||
| 570 | * These are timestamps used for computing total_time_enabled | ||
| 571 | * and total_time_running when the event is in INACTIVE or | ||
| 572 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
| 573 | * in time. | ||
| 574 | * tstamp_enabled: the notional time when the event was enabled | ||
| 575 | * tstamp_running: the notional time when the event was scheduled on | ||
| 576 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
| 577 | * event was scheduled off. | ||
| 578 | */ | ||
| 579 | u64 tstamp_enabled; | ||
| 580 | u64 tstamp_running; | ||
| 581 | u64 tstamp_stopped; | ||
| 582 | |||
| 583 | struct perf_event_attr attr; | ||
| 584 | struct hw_perf_event hw; | ||
| 585 | |||
| 586 | struct perf_event_context *ctx; | ||
| 587 | struct file *filp; | ||
| 588 | |||
| 589 | /* | ||
| 590 | * These accumulate total time (in nanoseconds) that children | ||
| 591 | * events have been enabled and running, respectively. | ||
| 592 | */ | ||
| 593 | atomic64_t child_total_time_enabled; | ||
| 594 | atomic64_t child_total_time_running; | ||
| 595 | |||
| 596 | /* | ||
| 597 | * Protect attach/detach and child_list: | ||
| 598 | */ | ||
| 599 | struct mutex child_mutex; | ||
| 600 | struct list_head child_list; | ||
| 601 | struct perf_event *parent; | ||
| 602 | |||
| 603 | int oncpu; | ||
| 604 | int cpu; | ||
| 605 | |||
| 606 | struct list_head owner_entry; | ||
| 607 | struct task_struct *owner; | ||
| 608 | |||
| 609 | /* mmap bits */ | ||
| 610 | struct mutex mmap_mutex; | ||
| 611 | atomic_t mmap_count; | ||
| 612 | struct perf_mmap_data *data; | ||
| 613 | |||
| 614 | /* poll related */ | ||
| 615 | wait_queue_head_t waitq; | ||
| 616 | struct fasync_struct *fasync; | ||
| 617 | |||
| 618 | /* delayed work for NMIs and such */ | ||
| 619 | int pending_wakeup; | ||
| 620 | int pending_kill; | ||
| 621 | int pending_disable; | ||
| 622 | struct perf_pending_entry pending; | ||
| 623 | |||
| 624 | atomic_t event_limit; | ||
| 625 | |||
| 626 | void (*destroy)(struct perf_event *); | ||
| 627 | struct rcu_head rcu_head; | ||
| 628 | |||
| 629 | struct pid_namespace *ns; | ||
| 630 | u64 id; | ||
| 631 | #endif | ||
| 632 | }; | ||
| 633 | |||
| 634 | /** | ||
| 635 | * struct perf_event_context - event context structure | ||
| 636 | * | ||
| 637 | * Used as a container for task events and CPU events as well: | ||
| 638 | */ | ||
| 639 | struct perf_event_context { | ||
| 640 | /* | ||
| 641 | * Protect the states of the events in the list, | ||
| 642 | * nr_active, and the list: | ||
| 643 | */ | ||
| 644 | spinlock_t lock; | ||
| 645 | /* | ||
| 646 | * Protect the list of events. Locking either mutex or lock | ||
| 647 | * is sufficient to ensure the list doesn't change; to change | ||
| 648 | * the list you need to lock both the mutex and the spinlock. | ||
| 649 | */ | ||
| 650 | struct mutex mutex; | ||
| 651 | |||
| 652 | struct list_head group_list; | ||
| 653 | struct list_head event_list; | ||
| 654 | int nr_events; | ||
| 655 | int nr_active; | ||
| 656 | int is_active; | ||
| 657 | int nr_stat; | ||
| 658 | atomic_t refcount; | ||
| 659 | struct task_struct *task; | ||
| 660 | |||
| 661 | /* | ||
| 662 | * Context clock, runs when context enabled. | ||
| 663 | */ | ||
| 664 | u64 time; | ||
| 665 | u64 timestamp; | ||
| 666 | |||
| 667 | /* | ||
| 668 | * These fields let us detect when two contexts have both | ||
| 669 | * been cloned (inherited) from a common ancestor. | ||
| 670 | */ | ||
| 671 | struct perf_event_context *parent_ctx; | ||
| 672 | u64 parent_gen; | ||
| 673 | u64 generation; | ||
| 674 | int pin_count; | ||
| 675 | struct rcu_head rcu_head; | ||
| 676 | }; | ||
| 677 | |||
| 678 | /** | ||
| 679 | * struct perf_event_cpu_context - per cpu event context structure | ||
| 680 | */ | ||
| 681 | struct perf_cpu_context { | ||
| 682 | struct perf_event_context ctx; | ||
| 683 | struct perf_event_context *task_ctx; | ||
| 684 | int active_oncpu; | ||
| 685 | int max_pertask; | ||
| 686 | int exclusive; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Recursion avoidance: | ||
| 690 | * | ||
| 691 | * task, softirq, irq, nmi context | ||
| 692 | */ | ||
| 693 | int recursion[4]; | ||
| 694 | }; | ||
| 695 | |||
| 696 | struct perf_output_handle { | ||
| 697 | struct perf_event *event; | ||
| 698 | struct perf_mmap_data *data; | ||
| 699 | unsigned long head; | ||
| 700 | unsigned long offset; | ||
| 701 | int nmi; | ||
| 702 | int sample; | ||
| 703 | int locked; | ||
| 704 | unsigned long flags; | ||
| 705 | }; | ||
| 706 | |||
| 707 | #ifdef CONFIG_PERF_EVENTS | ||
| 708 | |||
| 709 | /* | ||
| 710 | * Set by architecture code: | ||
| 711 | */ | ||
| 712 | extern int perf_max_events; | ||
| 713 | |||
| 714 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | ||
| 715 | |||
| 716 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); | ||
| 717 | extern void perf_event_task_sched_out(struct task_struct *task, | ||
| 718 | struct task_struct *next, int cpu); | ||
| 719 | extern void perf_event_task_tick(struct task_struct *task, int cpu); | ||
| 720 | extern int perf_event_init_task(struct task_struct *child); | ||
| 721 | extern void perf_event_exit_task(struct task_struct *child); | ||
| 722 | extern void perf_event_free_task(struct task_struct *task); | ||
| 723 | extern void set_perf_event_pending(void); | ||
| 724 | extern void perf_event_do_pending(void); | ||
| 725 | extern void perf_event_print_debug(void); | ||
| 726 | extern void __perf_disable(void); | ||
| 727 | extern bool __perf_enable(void); | ||
| 728 | extern void perf_disable(void); | ||
| 729 | extern void perf_enable(void); | ||
| 730 | extern int perf_event_task_disable(void); | ||
| 731 | extern int perf_event_task_enable(void); | ||
| 732 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
| 733 | struct perf_cpu_context *cpuctx, | ||
| 734 | struct perf_event_context *ctx, int cpu); | ||
| 735 | extern void perf_event_update_userpage(struct perf_event *event); | ||
| 736 | |||
| 737 | struct perf_sample_data { | ||
| 738 | u64 type; | ||
| 739 | |||
| 740 | u64 ip; | ||
| 741 | struct { | ||
| 742 | u32 pid; | ||
| 743 | u32 tid; | ||
| 744 | } tid_entry; | ||
| 745 | u64 time; | ||
| 746 | u64 addr; | ||
| 747 | u64 id; | ||
| 748 | u64 stream_id; | ||
| 749 | struct { | ||
| 750 | u32 cpu; | ||
| 751 | u32 reserved; | ||
| 752 | } cpu_entry; | ||
| 753 | u64 period; | ||
| 754 | struct perf_callchain_entry *callchain; | ||
| 755 | struct perf_raw_record *raw; | ||
| 756 | }; | ||
| 757 | |||
| 758 | extern void perf_output_sample(struct perf_output_handle *handle, | ||
| 759 | struct perf_event_header *header, | ||
| 760 | struct perf_sample_data *data, | ||
| 761 | struct perf_event *event); | ||
| 762 | extern void perf_prepare_sample(struct perf_event_header *header, | ||
| 763 | struct perf_sample_data *data, | ||
| 764 | struct perf_event *event, | ||
| 765 | struct pt_regs *regs); | ||
| 766 | |||
| 767 | extern int perf_event_overflow(struct perf_event *event, int nmi, | ||
| 768 | struct perf_sample_data *data, | ||
| 769 | struct pt_regs *regs); | ||
| 770 | |||
| 771 | /* | ||
| 772 | * Return 1 for a software event, 0 for a hardware event | ||
| 773 | */ | ||
| 774 | static inline int is_software_event(struct perf_event *event) | ||
| 775 | { | ||
| 776 | return (event->attr.type != PERF_TYPE_RAW) && | ||
| 777 | (event->attr.type != PERF_TYPE_HARDWARE) && | ||
| 778 | (event->attr.type != PERF_TYPE_HW_CACHE); | ||
| 779 | } | ||
| 780 | |||
| 781 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | ||
| 782 | |||
| 783 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | ||
| 784 | |||
| 785 | static inline void | ||
| 786 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
| 787 | { | ||
| 788 | if (atomic_read(&perf_swevent_enabled[event_id])) | ||
| 789 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
| 790 | } | ||
| 791 | |||
| 792 | extern void __perf_event_mmap(struct vm_area_struct *vma); | ||
| 793 | |||
| 794 | static inline void perf_event_mmap(struct vm_area_struct *vma) | ||
| 795 | { | ||
| 796 | if (vma->vm_flags & VM_EXEC) | ||
| 797 | __perf_event_mmap(vma); | ||
| 798 | } | ||
| 799 | |||
| 800 | extern void perf_event_comm(struct task_struct *tsk); | ||
| 801 | extern void perf_event_fork(struct task_struct *tsk); | ||
| 802 | |||
| 803 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
| 804 | |||
| 805 | extern int sysctl_perf_event_paranoid; | ||
| 806 | extern int sysctl_perf_event_mlock; | ||
| 807 | extern int sysctl_perf_event_sample_rate; | ||
| 808 | |||
| 809 | extern void perf_event_init(void); | ||
| 810 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | ||
| 811 | void *record, int entry_size); | ||
| 812 | |||
| 813 | #ifndef perf_misc_flags | ||
| 814 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | ||
| 815 | PERF_RECORD_MISC_KERNEL) | ||
| 816 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
| 817 | #endif | ||
| 818 | |||
| 819 | extern int perf_output_begin(struct perf_output_handle *handle, | ||
| 820 | struct perf_event *event, unsigned int size, | ||
| 821 | int nmi, int sample); | ||
| 822 | extern void perf_output_end(struct perf_output_handle *handle); | ||
| 823 | extern void perf_output_copy(struct perf_output_handle *handle, | ||
| 824 | const void *buf, unsigned int len); | ||
| 825 | #else | ||
| 826 | static inline void | ||
| 827 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | ||
| 828 | static inline void | ||
| 829 | perf_event_task_sched_out(struct task_struct *task, | ||
| 830 | struct task_struct *next, int cpu) { } | ||
| 831 | static inline void | ||
| 832 | perf_event_task_tick(struct task_struct *task, int cpu) { } | ||
| 833 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | ||
| 834 | static inline void perf_event_exit_task(struct task_struct *child) { } | ||
| 835 | static inline void perf_event_free_task(struct task_struct *task) { } | ||
| 836 | static inline void perf_event_do_pending(void) { } | ||
| 837 | static inline void perf_event_print_debug(void) { } | ||
| 838 | static inline void perf_disable(void) { } | ||
| 839 | static inline void perf_enable(void) { } | ||
| 840 | static inline int perf_event_task_disable(void) { return -EINVAL; } | ||
| 841 | static inline int perf_event_task_enable(void) { return -EINVAL; } | ||
| 842 | |||
| 843 | static inline void | ||
| 844 | perf_sw_event(u32 event_id, u64 nr, int nmi, | ||
| 845 | struct pt_regs *regs, u64 addr) { } | ||
| 846 | |||
| 847 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | ||
| 848 | static inline void perf_event_comm(struct task_struct *tsk) { } | ||
| 849 | static inline void perf_event_fork(struct task_struct *tsk) { } | ||
| 850 | static inline void perf_event_init(void) { } | ||
| 851 | |||
| 852 | #endif | ||
| 853 | |||
| 854 | #define perf_output_put(handle, x) \ | ||
| 855 | perf_output_copy((handle), &(x), sizeof(x)) | ||
| 856 | |||
| 857 | #endif /* __KERNEL__ */ | ||
| 858 | #endif /* _LINUX_PERF_EVENT_H */ | ||
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index b00df4c79c63..07bff666e65b 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
| @@ -85,7 +85,7 @@ | |||
| 85 | #define PR_SET_TIMERSLACK 29 | 85 | #define PR_SET_TIMERSLACK 29 |
| 86 | #define PR_GET_TIMERSLACK 30 | 86 | #define PR_GET_TIMERSLACK 30 |
| 87 | 87 | ||
| 88 | #define PR_TASK_PERF_COUNTERS_DISABLE 31 | 88 | #define PR_TASK_PERF_EVENTS_DISABLE 31 |
| 89 | #define PR_TASK_PERF_COUNTERS_ENABLE 32 | 89 | #define PR_TASK_PERF_EVENTS_ENABLE 32 |
| 90 | 90 | ||
| 91 | #endif /* _LINUX_PRCTL_H */ | 91 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 115af05ecabd..8fe351c3914a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -100,7 +100,7 @@ struct robust_list_head; | |||
| 100 | struct bio; | 100 | struct bio; |
| 101 | struct fs_struct; | 101 | struct fs_struct; |
| 102 | struct bts_context; | 102 | struct bts_context; |
| 103 | struct perf_counter_context; | 103 | struct perf_event_context; |
| 104 | 104 | ||
| 105 | /* | 105 | /* |
| 106 | * List of flags we want to share for kernel threads, | 106 | * List of flags we want to share for kernel threads, |
| @@ -701,7 +701,7 @@ struct user_struct { | |||
| 701 | #endif | 701 | #endif |
| 702 | #endif | 702 | #endif |
| 703 | 703 | ||
| 704 | #ifdef CONFIG_PERF_COUNTERS | 704 | #ifdef CONFIG_PERF_EVENTS |
| 705 | atomic_long_t locked_vm; | 705 | atomic_long_t locked_vm; |
| 706 | #endif | 706 | #endif |
| 707 | }; | 707 | }; |
| @@ -1451,10 +1451,10 @@ struct task_struct { | |||
| 1451 | struct list_head pi_state_list; | 1451 | struct list_head pi_state_list; |
| 1452 | struct futex_pi_state *pi_state_cache; | 1452 | struct futex_pi_state *pi_state_cache; |
| 1453 | #endif | 1453 | #endif |
| 1454 | #ifdef CONFIG_PERF_COUNTERS | 1454 | #ifdef CONFIG_PERF_EVENTS |
| 1455 | struct perf_counter_context *perf_counter_ctxp; | 1455 | struct perf_event_context *perf_event_ctxp; |
| 1456 | struct mutex perf_counter_mutex; | 1456 | struct mutex perf_event_mutex; |
| 1457 | struct list_head perf_counter_list; | 1457 | struct list_head perf_event_list; |
| 1458 | #endif | 1458 | #endif |
| 1459 | #ifdef CONFIG_NUMA | 1459 | #ifdef CONFIG_NUMA |
| 1460 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ | 1460 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7d9803cbb20f..8d8285a10db9 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -55,7 +55,7 @@ struct compat_timeval; | |||
| 55 | struct robust_list_head; | 55 | struct robust_list_head; |
| 56 | struct getcpu_cache; | 56 | struct getcpu_cache; |
| 57 | struct old_linux_dirent; | 57 | struct old_linux_dirent; |
| 58 | struct perf_counter_attr; | 58 | struct perf_event_attr; |
| 59 | 59 | ||
| 60 | #include <linux/types.h> | 60 | #include <linux/types.h> |
| 61 | #include <linux/aio_abi.h> | 61 | #include <linux/aio_abi.h> |
| @@ -877,7 +877,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, | |||
| 877 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 877 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
| 878 | 878 | ||
| 879 | 879 | ||
| 880 | asmlinkage long sys_perf_counter_open( | 880 | asmlinkage long sys_perf_event_open( |
| 881 | struct perf_counter_attr __user *attr_uptr, | 881 | struct perf_event_attr __user *attr_uptr, |
| 882 | pid_t pid, int cpu, int group_fd, unsigned long flags); | 882 | pid_t pid, int cpu, int group_fd, unsigned long flags); |
| 883 | #endif | 883 | #endif |
