diff options
Diffstat (limited to 'include/linux/perf_event.h')
| -rw-r--r-- | include/linux/perf_event.h | 289 |
1 files changed, 228 insertions, 61 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8fa71874113f..716f99b682c1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -203,19 +203,29 @@ struct perf_event_attr { | |||
| 203 | enable_on_exec : 1, /* next exec enables */ | 203 | enable_on_exec : 1, /* next exec enables */ |
| 204 | task : 1, /* trace fork/exit */ | 204 | task : 1, /* trace fork/exit */ |
| 205 | watermark : 1, /* wakeup_watermark */ | 205 | watermark : 1, /* wakeup_watermark */ |
| 206 | 206 | /* | |
| 207 | __reserved_1 : 49; | 207 | * precise_ip: |
| 208 | * | ||
| 209 | * 0 - SAMPLE_IP can have arbitrary skid | ||
| 210 | * 1 - SAMPLE_IP must have constant skid | ||
| 211 | * 2 - SAMPLE_IP requested to have 0 skid | ||
| 212 | * 3 - SAMPLE_IP must have 0 skid | ||
| 213 | * | ||
| 214 | * See also PERF_RECORD_MISC_EXACT_IP | ||
| 215 | */ | ||
| 216 | precise_ip : 2, /* skid constraint */ | ||
| 217 | mmap_data : 1, /* non-exec mmap data */ | ||
| 218 | |||
| 219 | __reserved_1 : 46; | ||
| 208 | 220 | ||
| 209 | union { | 221 | union { |
| 210 | __u32 wakeup_events; /* wakeup every n events */ | 222 | __u32 wakeup_events; /* wakeup every n events */ |
| 211 | __u32 wakeup_watermark; /* bytes before wakeup */ | 223 | __u32 wakeup_watermark; /* bytes before wakeup */ |
| 212 | }; | 224 | }; |
| 213 | 225 | ||
| 214 | __u32 __reserved_2; | ||
| 215 | |||
| 216 | __u64 bp_addr; | ||
| 217 | __u32 bp_type; | 226 | __u32 bp_type; |
| 218 | __u32 bp_len; | 227 | __u64 bp_addr; |
| 228 | __u64 bp_len; | ||
| 219 | }; | 229 | }; |
| 220 | 230 | ||
| 221 | /* | 231 | /* |
| @@ -289,11 +299,24 @@ struct perf_event_mmap_page { | |||
| 289 | __u64 data_tail; /* user-space written tail */ | 299 | __u64 data_tail; /* user-space written tail */ |
| 290 | }; | 300 | }; |
| 291 | 301 | ||
| 292 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 302 | #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) |
| 293 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 303 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
| 294 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 304 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
| 295 | #define PERF_RECORD_MISC_USER (2 << 0) | 305 | #define PERF_RECORD_MISC_USER (2 << 0) |
| 296 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 306 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
| 307 | #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) | ||
| 308 | #define PERF_RECORD_MISC_GUEST_USER (5 << 0) | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Indicates that the content of PERF_SAMPLE_IP points to | ||
| 312 | * the actual instruction that triggered the event. See also | ||
| 313 | * perf_event_attr::precise_ip. | ||
| 314 | */ | ||
| 315 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) | ||
| 316 | /* | ||
| 317 | * Reserve the last bit to indicate some extended misc field | ||
| 318 | */ | ||
| 319 | #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) | ||
| 297 | 320 | ||
| 298 | struct perf_event_header { | 321 | struct perf_event_header { |
| 299 | __u32 type; | 322 | __u32 type; |
| @@ -356,8 +379,8 @@ enum perf_event_type { | |||
| 356 | * u64 stream_id; | 379 | * u64 stream_id; |
| 357 | * }; | 380 | * }; |
| 358 | */ | 381 | */ |
| 359 | PERF_RECORD_THROTTLE = 5, | 382 | PERF_RECORD_THROTTLE = 5, |
| 360 | PERF_RECORD_UNTHROTTLE = 6, | 383 | PERF_RECORD_UNTHROTTLE = 6, |
| 361 | 384 | ||
| 362 | /* | 385 | /* |
| 363 | * struct { | 386 | * struct { |
| @@ -371,10 +394,10 @@ enum perf_event_type { | |||
| 371 | 394 | ||
| 372 | /* | 395 | /* |
| 373 | * struct { | 396 | * struct { |
| 374 | * struct perf_event_header header; | 397 | * struct perf_event_header header; |
| 375 | * u32 pid, tid; | 398 | * u32 pid, tid; |
| 376 | * | 399 | * |
| 377 | * struct read_format values; | 400 | * struct read_format values; |
| 378 | * }; | 401 | * }; |
| 379 | */ | 402 | */ |
| 380 | PERF_RECORD_READ = 8, | 403 | PERF_RECORD_READ = 8, |
| @@ -412,7 +435,7 @@ enum perf_event_type { | |||
| 412 | * char data[size];}&& PERF_SAMPLE_RAW | 435 | * char data[size];}&& PERF_SAMPLE_RAW |
| 413 | * }; | 436 | * }; |
| 414 | */ | 437 | */ |
| 415 | PERF_RECORD_SAMPLE = 9, | 438 | PERF_RECORD_SAMPLE = 9, |
| 416 | 439 | ||
| 417 | PERF_RECORD_MAX, /* non-ABI */ | 440 | PERF_RECORD_MAX, /* non-ABI */ |
| 418 | }; | 441 | }; |
| @@ -439,8 +462,15 @@ enum perf_callchain_context { | |||
| 439 | 462 | ||
| 440 | #ifdef CONFIG_PERF_EVENTS | 463 | #ifdef CONFIG_PERF_EVENTS |
| 441 | # include <asm/perf_event.h> | 464 | # include <asm/perf_event.h> |
| 465 | # include <asm/local64.h> | ||
| 442 | #endif | 466 | #endif |
| 443 | 467 | ||
| 468 | struct perf_guest_info_callbacks { | ||
| 469 | int (*is_in_guest) (void); | ||
| 470 | int (*is_user_mode) (void); | ||
| 471 | unsigned long (*get_guest_ip) (void); | ||
| 472 | }; | ||
| 473 | |||
| 444 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 474 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 445 | #include <asm/hw_breakpoint.h> | 475 | #include <asm/hw_breakpoint.h> |
| 446 | #endif | 476 | #endif |
| @@ -454,7 +484,10 @@ enum perf_callchain_context { | |||
| 454 | #include <linux/fs.h> | 484 | #include <linux/fs.h> |
| 455 | #include <linux/pid_namespace.h> | 485 | #include <linux/pid_namespace.h> |
| 456 | #include <linux/workqueue.h> | 486 | #include <linux/workqueue.h> |
| 487 | #include <linux/ftrace.h> | ||
| 488 | #include <linux/cpu.h> | ||
| 457 | #include <asm/atomic.h> | 489 | #include <asm/atomic.h> |
| 490 | #include <asm/local.h> | ||
| 458 | 491 | ||
| 459 | #define PERF_MAX_STACK_DEPTH 255 | 492 | #define PERF_MAX_STACK_DEPTH 255 |
| 460 | 493 | ||
| @@ -468,6 +501,17 @@ struct perf_raw_record { | |||
| 468 | void *data; | 501 | void *data; |
| 469 | }; | 502 | }; |
| 470 | 503 | ||
| 504 | struct perf_branch_entry { | ||
| 505 | __u64 from; | ||
| 506 | __u64 to; | ||
| 507 | __u64 flags; | ||
| 508 | }; | ||
| 509 | |||
| 510 | struct perf_branch_stack { | ||
| 511 | __u64 nr; | ||
| 512 | struct perf_branch_entry entries[0]; | ||
| 513 | }; | ||
| 514 | |||
| 471 | struct task_struct; | 515 | struct task_struct; |
| 472 | 516 | ||
| 473 | /** | 517 | /** |
| @@ -478,42 +522,75 @@ struct hw_perf_event { | |||
| 478 | union { | 522 | union { |
| 479 | struct { /* hardware */ | 523 | struct { /* hardware */ |
| 480 | u64 config; | 524 | u64 config; |
| 525 | u64 last_tag; | ||
| 481 | unsigned long config_base; | 526 | unsigned long config_base; |
| 482 | unsigned long event_base; | 527 | unsigned long event_base; |
| 483 | int idx; | 528 | int idx; |
| 529 | int last_cpu; | ||
| 484 | }; | 530 | }; |
| 485 | struct { /* software */ | 531 | struct { /* software */ |
| 486 | s64 remaining; | 532 | s64 remaining; |
| 487 | struct hrtimer hrtimer; | 533 | struct hrtimer hrtimer; |
| 488 | }; | 534 | }; |
| 489 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 490 | union { /* breakpoint */ | 536 | struct { /* breakpoint */ |
| 491 | struct arch_hw_breakpoint info; | 537 | struct arch_hw_breakpoint info; |
| 538 | struct list_head bp_list; | ||
| 492 | }; | 539 | }; |
| 493 | #endif | 540 | #endif |
| 494 | }; | 541 | }; |
| 495 | atomic64_t prev_count; | 542 | local64_t prev_count; |
| 496 | u64 sample_period; | 543 | u64 sample_period; |
| 497 | u64 last_period; | 544 | u64 last_period; |
| 498 | atomic64_t period_left; | 545 | local64_t period_left; |
| 499 | u64 interrupts; | 546 | u64 interrupts; |
| 500 | 547 | ||
| 501 | u64 freq_count; | 548 | u64 freq_time_stamp; |
| 502 | u64 freq_interrupts; | 549 | u64 freq_count_stamp; |
| 503 | u64 freq_stamp; | ||
| 504 | #endif | 550 | #endif |
| 505 | }; | 551 | }; |
| 506 | 552 | ||
| 507 | struct perf_event; | 553 | struct perf_event; |
| 508 | 554 | ||
| 555 | /* | ||
| 556 | * Common implementation detail of pmu::{start,commit,cancel}_txn | ||
| 557 | */ | ||
| 558 | #define PERF_EVENT_TXN 0x1 | ||
| 559 | |||
| 509 | /** | 560 | /** |
| 510 | * struct pmu - generic performance monitoring unit | 561 | * struct pmu - generic performance monitoring unit |
| 511 | */ | 562 | */ |
| 512 | struct pmu { | 563 | struct pmu { |
| 513 | int (*enable) (struct perf_event *event); | 564 | int (*enable) (struct perf_event *event); |
| 514 | void (*disable) (struct perf_event *event); | 565 | void (*disable) (struct perf_event *event); |
| 566 | int (*start) (struct perf_event *event); | ||
| 567 | void (*stop) (struct perf_event *event); | ||
| 515 | void (*read) (struct perf_event *event); | 568 | void (*read) (struct perf_event *event); |
| 516 | void (*unthrottle) (struct perf_event *event); | 569 | void (*unthrottle) (struct perf_event *event); |
| 570 | |||
| 571 | /* | ||
| 572 | * Group events scheduling is treated as a transaction, add group | ||
| 573 | * events as a whole and perform one schedulability test. If the test | ||
| 574 | * fails, roll back the whole group | ||
| 575 | */ | ||
| 576 | |||
| 577 | /* | ||
| 578 | * Start the transaction, after this ->enable() doesn't need | ||
| 579 | * to do schedulability tests. | ||
| 580 | */ | ||
| 581 | void (*start_txn) (const struct pmu *pmu); | ||
| 582 | /* | ||
| 583 | * If ->start_txn() disabled the ->enable() schedulability test | ||
| 584 | * then ->commit_txn() is required to perform one. On success | ||
| 585 | * the transaction is closed. On error the transaction is kept | ||
| 586 | * open until ->cancel_txn() is called. | ||
| 587 | */ | ||
| 588 | int (*commit_txn) (const struct pmu *pmu); | ||
| 589 | /* | ||
| 590 | * Will cancel the transaction, assumes ->disable() is called for | ||
| 591 | * each successfull ->enable() during the transaction. | ||
| 592 | */ | ||
| 593 | void (*cancel_txn) (const struct pmu *pmu); | ||
| 517 | }; | 594 | }; |
| 518 | 595 | ||
| 519 | /** | 596 | /** |
| @@ -528,25 +605,25 @@ enum perf_event_active_state { | |||
| 528 | 605 | ||
| 529 | struct file; | 606 | struct file; |
| 530 | 607 | ||
| 531 | struct perf_mmap_data { | 608 | #define PERF_BUFFER_WRITABLE 0x01 |
| 609 | |||
| 610 | struct perf_buffer { | ||
| 611 | atomic_t refcount; | ||
| 532 | struct rcu_head rcu_head; | 612 | struct rcu_head rcu_head; |
| 533 | #ifdef CONFIG_PERF_USE_VMALLOC | 613 | #ifdef CONFIG_PERF_USE_VMALLOC |
| 534 | struct work_struct work; | 614 | struct work_struct work; |
| 615 | int page_order; /* allocation order */ | ||
| 535 | #endif | 616 | #endif |
| 536 | int data_order; | ||
| 537 | int nr_pages; /* nr of data pages */ | 617 | int nr_pages; /* nr of data pages */ |
| 538 | int writable; /* are we writable */ | 618 | int writable; /* are we writable */ |
| 539 | int nr_locked; /* nr pages mlocked */ | ||
| 540 | 619 | ||
| 541 | atomic_t poll; /* POLL_ for wakeups */ | 620 | atomic_t poll; /* POLL_ for wakeups */ |
| 542 | atomic_t events; /* event_id limit */ | ||
| 543 | 621 | ||
| 544 | atomic_long_t head; /* write position */ | 622 | local_t head; /* write position */ |
| 545 | atomic_long_t done_head; /* completed head */ | 623 | local_t nest; /* nested writers */ |
| 546 | 624 | local_t events; /* event limit */ | |
| 547 | atomic_t lock; /* concurrent writes */ | 625 | local_t wakeup; /* wakeup stamp */ |
| 548 | atomic_t wakeup; /* needs a wakeup */ | 626 | local_t lost; /* nr records lost */ |
| 549 | atomic_t lost; /* nr records lost */ | ||
| 550 | 627 | ||
| 551 | long watermark; /* wakeup watermark */ | 628 | long watermark; /* wakeup watermark */ |
| 552 | 629 | ||
| @@ -565,6 +642,21 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
| 565 | struct perf_sample_data *, | 642 | struct perf_sample_data *, |
| 566 | struct pt_regs *regs); | 643 | struct pt_regs *regs); |
| 567 | 644 | ||
| 645 | enum perf_group_flag { | ||
| 646 | PERF_GROUP_SOFTWARE = 0x1, | ||
| 647 | }; | ||
| 648 | |||
| 649 | #define SWEVENT_HLIST_BITS 8 | ||
| 650 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | ||
| 651 | |||
| 652 | struct swevent_hlist { | ||
| 653 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | ||
| 654 | struct rcu_head rcu_head; | ||
| 655 | }; | ||
| 656 | |||
| 657 | #define PERF_ATTACH_CONTEXT 0x01 | ||
| 658 | #define PERF_ATTACH_GROUP 0x02 | ||
| 659 | |||
| 568 | /** | 660 | /** |
| 569 | * struct perf_event - performance event kernel representation: | 661 | * struct perf_event - performance event kernel representation: |
| 570 | */ | 662 | */ |
| @@ -573,13 +665,16 @@ struct perf_event { | |||
| 573 | struct list_head group_entry; | 665 | struct list_head group_entry; |
| 574 | struct list_head event_entry; | 666 | struct list_head event_entry; |
| 575 | struct list_head sibling_list; | 667 | struct list_head sibling_list; |
| 668 | struct hlist_node hlist_entry; | ||
| 576 | int nr_siblings; | 669 | int nr_siblings; |
| 670 | int group_flags; | ||
| 577 | struct perf_event *group_leader; | 671 | struct perf_event *group_leader; |
| 578 | struct perf_event *output; | ||
| 579 | const struct pmu *pmu; | 672 | const struct pmu *pmu; |
| 580 | 673 | ||
| 581 | enum perf_event_active_state state; | 674 | enum perf_event_active_state state; |
| 582 | atomic64_t count; | 675 | unsigned int attach_state; |
| 676 | local64_t count; | ||
| 677 | atomic64_t child_count; | ||
| 583 | 678 | ||
| 584 | /* | 679 | /* |
| 585 | * These are the total time in nanoseconds that the event | 680 | * These are the total time in nanoseconds that the event |
| @@ -636,7 +731,9 @@ struct perf_event { | |||
| 636 | /* mmap bits */ | 731 | /* mmap bits */ |
| 637 | struct mutex mmap_mutex; | 732 | struct mutex mmap_mutex; |
| 638 | atomic_t mmap_count; | 733 | atomic_t mmap_count; |
| 639 | struct perf_mmap_data *data; | 734 | int mmap_locked; |
| 735 | struct user_struct *mmap_user; | ||
| 736 | struct perf_buffer *buffer; | ||
| 640 | 737 | ||
| 641 | /* poll related */ | 738 | /* poll related */ |
| 642 | wait_queue_head_t waitq; | 739 | wait_queue_head_t waitq; |
| @@ -658,7 +755,8 @@ struct perf_event { | |||
| 658 | 755 | ||
| 659 | perf_overflow_handler_t overflow_handler; | 756 | perf_overflow_handler_t overflow_handler; |
| 660 | 757 | ||
| 661 | #ifdef CONFIG_EVENT_PROFILE | 758 | #ifdef CONFIG_EVENT_TRACING |
| 759 | struct ftrace_event_call *tp_event; | ||
| 662 | struct event_filter *filter; | 760 | struct event_filter *filter; |
| 663 | #endif | 761 | #endif |
| 664 | 762 | ||
| @@ -683,7 +781,8 @@ struct perf_event_context { | |||
| 683 | */ | 781 | */ |
| 684 | struct mutex mutex; | 782 | struct mutex mutex; |
| 685 | 783 | ||
| 686 | struct list_head group_list; | 784 | struct list_head pinned_groups; |
| 785 | struct list_head flexible_groups; | ||
| 687 | struct list_head event_list; | 786 | struct list_head event_list; |
| 688 | int nr_events; | 787 | int nr_events; |
| 689 | int nr_active; | 788 | int nr_active; |
| @@ -718,6 +817,9 @@ struct perf_cpu_context { | |||
| 718 | int active_oncpu; | 817 | int active_oncpu; |
| 719 | int max_pertask; | 818 | int max_pertask; |
| 720 | int exclusive; | 819 | int exclusive; |
| 820 | struct swevent_hlist *swevent_hlist; | ||
| 821 | struct mutex hlist_mutex; | ||
| 822 | int hlist_refcount; | ||
| 721 | 823 | ||
| 722 | /* | 824 | /* |
| 723 | * Recursion avoidance: | 825 | * Recursion avoidance: |
| @@ -729,12 +831,13 @@ struct perf_cpu_context { | |||
| 729 | 831 | ||
| 730 | struct perf_output_handle { | 832 | struct perf_output_handle { |
| 731 | struct perf_event *event; | 833 | struct perf_event *event; |
| 732 | struct perf_mmap_data *data; | 834 | struct perf_buffer *buffer; |
| 733 | unsigned long head; | 835 | unsigned long wakeup; |
| 734 | unsigned long offset; | 836 | unsigned long size; |
| 837 | void *addr; | ||
| 838 | int page; | ||
| 735 | int nmi; | 839 | int nmi; |
| 736 | int sample; | 840 | int sample; |
| 737 | int locked; | ||
| 738 | }; | 841 | }; |
| 739 | 842 | ||
| 740 | #ifdef CONFIG_PERF_EVENTS | 843 | #ifdef CONFIG_PERF_EVENTS |
| @@ -746,10 +849,9 @@ extern int perf_max_events; | |||
| 746 | 849 | ||
| 747 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
| 748 | 851 | ||
| 749 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); | 852 | extern void perf_event_task_sched_in(struct task_struct *task); |
| 750 | extern void perf_event_task_sched_out(struct task_struct *task, | 853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
| 751 | struct task_struct *next, int cpu); | 854 | extern void perf_event_task_tick(struct task_struct *task); |
| 752 | extern void perf_event_task_tick(struct task_struct *task, int cpu); | ||
| 753 | extern int perf_event_init_task(struct task_struct *child); | 855 | extern int perf_event_init_task(struct task_struct *child); |
| 754 | extern void perf_event_exit_task(struct task_struct *child); | 856 | extern void perf_event_exit_task(struct task_struct *child); |
| 755 | extern void perf_event_free_task(struct task_struct *task); | 857 | extern void perf_event_free_task(struct task_struct *task); |
| @@ -762,9 +864,6 @@ extern void perf_disable(void); | |||
| 762 | extern void perf_enable(void); | 864 | extern void perf_enable(void); |
| 763 | extern int perf_event_task_disable(void); | 865 | extern int perf_event_task_disable(void); |
| 764 | extern int perf_event_task_enable(void); | 866 | extern int perf_event_task_enable(void); |
| 765 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
| 766 | struct perf_cpu_context *cpuctx, | ||
| 767 | struct perf_event_context *ctx, int cpu); | ||
| 768 | extern void perf_event_update_userpage(struct perf_event *event); | 867 | extern void perf_event_update_userpage(struct perf_event *event); |
| 769 | extern int perf_event_release_kernel(struct perf_event *event); | 868 | extern int perf_event_release_kernel(struct perf_event *event); |
| 770 | extern struct perf_event * | 869 | extern struct perf_event * |
| @@ -796,6 +895,13 @@ struct perf_sample_data { | |||
| 796 | struct perf_raw_record *raw; | 895 | struct perf_raw_record *raw; |
| 797 | }; | 896 | }; |
| 798 | 897 | ||
| 898 | static inline | ||
| 899 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
| 900 | { | ||
| 901 | data->addr = addr; | ||
| 902 | data->raw = NULL; | ||
| 903 | } | ||
| 904 | |||
| 799 | extern void perf_output_sample(struct perf_output_handle *handle, | 905 | extern void perf_output_sample(struct perf_output_handle *handle, |
| 800 | struct perf_event_header *header, | 906 | struct perf_event_header *header, |
| 801 | struct perf_sample_data *data, | 907 | struct perf_sample_data *data, |
| @@ -828,21 +934,45 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
| 828 | 934 | ||
| 829 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 935 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
| 830 | 936 | ||
| 937 | #ifndef perf_arch_fetch_caller_regs | ||
| 831 | static inline void | 938 | static inline void |
| 832 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 939 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
| 940 | #endif | ||
| 941 | |||
| 942 | /* | ||
| 943 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
| 944 | * the nth caller. We only need a few of the regs: | ||
| 945 | * - ip for PERF_SAMPLE_IP | ||
| 946 | * - cs for user_mode() tests | ||
| 947 | * - bp for callchains | ||
| 948 | * - eflags, for future purposes, just in case | ||
| 949 | */ | ||
| 950 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) | ||
| 833 | { | 951 | { |
| 834 | if (atomic_read(&perf_swevent_enabled[event_id])) | 952 | memset(regs, 0, sizeof(*regs)); |
| 835 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
| 836 | } | ||
| 837 | 953 | ||
| 838 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
| 955 | } | ||
| 839 | 956 | ||
| 840 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 957 | static inline void |
| 958 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
| 841 | { | 959 | { |
| 842 | if (vma->vm_flags & VM_EXEC) | 960 | if (atomic_read(&perf_swevent_enabled[event_id])) { |
| 843 | __perf_event_mmap(vma); | 961 | struct pt_regs hot_regs; |
| 962 | |||
| 963 | if (!regs) { | ||
| 964 | perf_fetch_caller_regs(&hot_regs); | ||
| 965 | regs = &hot_regs; | ||
| 966 | } | ||
| 967 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
| 968 | } | ||
| 844 | } | 969 | } |
| 845 | 970 | ||
| 971 | extern void perf_event_mmap(struct vm_area_struct *vma); | ||
| 972 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | ||
| 973 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | ||
| 974 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | ||
| 975 | |||
| 846 | extern void perf_event_comm(struct task_struct *tsk); | 976 | extern void perf_event_comm(struct task_struct *tsk); |
| 847 | extern void perf_event_fork(struct task_struct *tsk); | 977 | extern void perf_event_fork(struct task_struct *tsk); |
| 848 | 978 | ||
| @@ -852,9 +982,25 @@ extern int sysctl_perf_event_paranoid; | |||
| 852 | extern int sysctl_perf_event_mlock; | 982 | extern int sysctl_perf_event_mlock; |
| 853 | extern int sysctl_perf_event_sample_rate; | 983 | extern int sysctl_perf_event_sample_rate; |
| 854 | 984 | ||
| 985 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
| 986 | { | ||
| 987 | return sysctl_perf_event_paranoid > -1; | ||
| 988 | } | ||
| 989 | |||
| 990 | static inline bool perf_paranoid_cpu(void) | ||
| 991 | { | ||
| 992 | return sysctl_perf_event_paranoid > 0; | ||
| 993 | } | ||
| 994 | |||
| 995 | static inline bool perf_paranoid_kernel(void) | ||
| 996 | { | ||
| 997 | return sysctl_perf_event_paranoid > 1; | ||
| 998 | } | ||
| 999 | |||
| 855 | extern void perf_event_init(void); | 1000 | extern void perf_event_init(void); |
| 856 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 1001 | extern void perf_tp_event(u64 addr, u64 count, void *record, |
| 857 | void *record, int entry_size); | 1002 | int entry_size, struct pt_regs *regs, |
| 1003 | struct hlist_head *head, int rctx); | ||
| 858 | extern void perf_bp_event(struct perf_event *event, void *data); | 1004 | extern void perf_bp_event(struct perf_event *event, void *data); |
| 859 | 1005 | ||
| 860 | #ifndef perf_misc_flags | 1006 | #ifndef perf_misc_flags |
| @@ -875,12 +1021,12 @@ extern void perf_event_enable(struct perf_event *event); | |||
| 875 | extern void perf_event_disable(struct perf_event *event); | 1021 | extern void perf_event_disable(struct perf_event *event); |
| 876 | #else | 1022 | #else |
| 877 | static inline void | 1023 | static inline void |
| 878 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 1024 | perf_event_task_sched_in(struct task_struct *task) { } |
| 879 | static inline void | 1025 | static inline void |
| 880 | perf_event_task_sched_out(struct task_struct *task, | 1026 | perf_event_task_sched_out(struct task_struct *task, |
| 881 | struct task_struct *next, int cpu) { } | 1027 | struct task_struct *next) { } |
| 882 | static inline void | 1028 | static inline void |
| 883 | perf_event_task_tick(struct task_struct *task, int cpu) { } | 1029 | perf_event_task_tick(struct task_struct *task) { } |
| 884 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1030 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 885 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1031 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 886 | static inline void perf_event_free_task(struct task_struct *task) { } | 1032 | static inline void perf_event_free_task(struct task_struct *task) { } |
| @@ -895,13 +1041,18 @@ static inline void | |||
| 895 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 1041 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
| 896 | struct pt_regs *regs, u64 addr) { } | 1042 | struct pt_regs *regs, u64 addr) { } |
| 897 | static inline void | 1043 | static inline void |
| 898 | perf_bp_event(struct perf_event *event, void *data) { } | 1044 | perf_bp_event(struct perf_event *event, void *data) { } |
| 1045 | |||
| 1046 | static inline int perf_register_guest_info_callbacks | ||
| 1047 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | ||
| 1048 | static inline int perf_unregister_guest_info_callbacks | ||
| 1049 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | ||
| 899 | 1050 | ||
| 900 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1051 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
| 901 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1052 | static inline void perf_event_comm(struct task_struct *tsk) { } |
| 902 | static inline void perf_event_fork(struct task_struct *tsk) { } | 1053 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 903 | static inline void perf_event_init(void) { } | 1054 | static inline void perf_event_init(void) { } |
| 904 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 1055 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
| 905 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1056 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
| 906 | static inline void perf_event_enable(struct perf_event *event) { } | 1057 | static inline void perf_event_enable(struct perf_event *event) { } |
| 907 | static inline void perf_event_disable(struct perf_event *event) { } | 1058 | static inline void perf_event_disable(struct perf_event *event) { } |
| @@ -910,5 +1061,21 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
| 910 | #define perf_output_put(handle, x) \ | 1061 | #define perf_output_put(handle, x) \ |
| 911 | perf_output_copy((handle), &(x), sizeof(x)) | 1062 | perf_output_copy((handle), &(x), sizeof(x)) |
| 912 | 1063 | ||
| 1064 | /* | ||
| 1065 | * This has to have a higher priority than migration_notifier in sched.c. | ||
| 1066 | */ | ||
| 1067 | #define perf_cpu_notifier(fn) \ | ||
| 1068 | do { \ | ||
| 1069 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
| 1070 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | ||
| 1071 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
| 1072 | (void *)(unsigned long)smp_processor_id()); \ | ||
| 1073 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
| 1074 | (void *)(unsigned long)smp_processor_id()); \ | ||
| 1075 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
| 1076 | (void *)(unsigned long)smp_processor_id()); \ | ||
| 1077 | register_cpu_notifier(&fn##_nb); \ | ||
| 1078 | } while (0) | ||
| 1079 | |||
| 913 | #endif /* __KERNEL__ */ | 1080 | #endif /* __KERNEL__ */ |
| 914 | #endif /* _LINUX_PERF_EVENT_H */ | 1081 | #endif /* _LINUX_PERF_EVENT_H */ |
