diff options
Diffstat (limited to 'include/linux/perf_counter.h')
| -rw-r--r-- | include/linux/perf_counter.h | 60 |
1 files changed, 48 insertions, 12 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 89698d8aba5c..e604e6ef72dd 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
| @@ -120,8 +120,9 @@ enum perf_counter_sample_format { | |||
| 120 | PERF_SAMPLE_ID = 1U << 6, | 120 | PERF_SAMPLE_ID = 1U << 6, |
| 121 | PERF_SAMPLE_CPU = 1U << 7, | 121 | PERF_SAMPLE_CPU = 1U << 7, |
| 122 | PERF_SAMPLE_PERIOD = 1U << 8, | 122 | PERF_SAMPLE_PERIOD = 1U << 8, |
| 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | ||
| 123 | 124 | ||
| 124 | PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */ | 125 | PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ |
| 125 | }; | 126 | }; |
| 126 | 127 | ||
| 127 | /* | 128 | /* |
| @@ -178,8 +179,11 @@ struct perf_counter_attr { | |||
| 178 | mmap : 1, /* include mmap data */ | 179 | mmap : 1, /* include mmap data */ |
| 179 | comm : 1, /* include comm data */ | 180 | comm : 1, /* include comm data */ |
| 180 | freq : 1, /* use freq, not period */ | 181 | freq : 1, /* use freq, not period */ |
| 182 | inherit_stat : 1, /* per task counts */ | ||
| 183 | enable_on_exec : 1, /* next exec enables */ | ||
| 184 | task : 1, /* trace fork/exit */ | ||
| 181 | 185 | ||
| 182 | __reserved_1 : 53; | 186 | __reserved_1 : 50; |
| 183 | 187 | ||
| 184 | __u32 wakeup_events; /* wakeup every n events */ | 188 | __u32 wakeup_events; /* wakeup every n events */ |
| 185 | __u32 __reserved_2; | 189 | __u32 __reserved_2; |
| @@ -232,6 +236,14 @@ struct perf_counter_mmap_page { | |||
| 232 | __u32 lock; /* seqlock for synchronization */ | 236 | __u32 lock; /* seqlock for synchronization */ |
| 233 | __u32 index; /* hardware counter identifier */ | 237 | __u32 index; /* hardware counter identifier */ |
| 234 | __s64 offset; /* add to hardware counter value */ | 238 | __s64 offset; /* add to hardware counter value */ |
| 239 | __u64 time_enabled; /* time counter active */ | ||
| 240 | __u64 time_running; /* time counter on cpu */ | ||
| 241 | |||
| 242 | /* | ||
| 243 | * Hole for extension of the self monitor capabilities | ||
| 244 | */ | ||
| 245 | |||
| 246 | __u64 __reserved[123]; /* align to 1k */ | ||
| 235 | 247 | ||
| 236 | /* | 248 | /* |
| 237 | * Control data for the mmap() data buffer. | 249 | * Control data for the mmap() data buffer. |
| @@ -253,7 +265,6 @@ struct perf_counter_mmap_page { | |||
| 253 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | 265 | #define PERF_EVENT_MISC_KERNEL (1 << 0) |
| 254 | #define PERF_EVENT_MISC_USER (2 << 0) | 266 | #define PERF_EVENT_MISC_USER (2 << 0) |
| 255 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | 267 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) |
| 256 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) | ||
| 257 | 268 | ||
| 258 | struct perf_event_header { | 269 | struct perf_event_header { |
| 259 | __u32 type; | 270 | __u32 type; |
| @@ -301,18 +312,18 @@ enum perf_event_type { | |||
| 301 | /* | 312 | /* |
| 302 | * struct { | 313 | * struct { |
| 303 | * struct perf_event_header header; | 314 | * struct perf_event_header header; |
| 304 | * u64 time; | 315 | * u32 pid, ppid; |
| 305 | * u64 id; | 316 | * u32 tid, ptid; |
| 306 | * u64 sample_period; | ||
| 307 | * }; | 317 | * }; |
| 308 | */ | 318 | */ |
| 309 | PERF_EVENT_PERIOD = 4, | 319 | PERF_EVENT_EXIT = 4, |
| 310 | 320 | ||
| 311 | /* | 321 | /* |
| 312 | * struct { | 322 | * struct { |
| 313 | * struct perf_event_header header; | 323 | * struct perf_event_header header; |
| 314 | * u64 time; | 324 | * u64 time; |
| 315 | * u64 id; | 325 | * u64 id; |
| 326 | * u64 stream_id; | ||
| 316 | * }; | 327 | * }; |
| 317 | */ | 328 | */ |
| 318 | PERF_EVENT_THROTTLE = 5, | 329 | PERF_EVENT_THROTTLE = 5, |
| @@ -322,14 +333,24 @@ enum perf_event_type { | |||
| 322 | * struct { | 333 | * struct { |
| 323 | * struct perf_event_header header; | 334 | * struct perf_event_header header; |
| 324 | * u32 pid, ppid; | 335 | * u32 pid, ppid; |
| 336 | * u32 tid, ptid; | ||
| 325 | * }; | 337 | * }; |
| 326 | */ | 338 | */ |
| 327 | PERF_EVENT_FORK = 7, | 339 | PERF_EVENT_FORK = 7, |
| 328 | 340 | ||
| 329 | /* | 341 | /* |
| 330 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | 342 | * struct { |
| 331 | * will be PERF_SAMPLE_* | 343 | * struct perf_event_header header; |
| 332 | * | 344 | * u32 pid, tid; |
| 345 | * u64 value; | ||
| 346 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
| 347 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
| 348 | * { u64 parent_id; } && PERF_FORMAT_ID | ||
| 349 | * }; | ||
| 350 | */ | ||
| 351 | PERF_EVENT_READ = 8, | ||
| 352 | |||
| 353 | /* | ||
| 333 | * struct { | 354 | * struct { |
| 334 | * struct perf_event_header header; | 355 | * struct perf_event_header header; |
| 335 | * | 356 | * |
| @@ -337,8 +358,10 @@ enum perf_event_type { | |||
| 337 | * { u32 pid, tid; } && PERF_SAMPLE_TID | 358 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
| 338 | * { u64 time; } && PERF_SAMPLE_TIME | 359 | * { u64 time; } && PERF_SAMPLE_TIME |
| 339 | * { u64 addr; } && PERF_SAMPLE_ADDR | 360 | * { u64 addr; } && PERF_SAMPLE_ADDR |
| 340 | * { u64 config; } && PERF_SAMPLE_CONFIG | 361 | * { u64 id; } && PERF_SAMPLE_ID |
| 362 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | ||
| 341 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 363 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
| 364 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
| 342 | * | 365 | * |
| 343 | * { u64 nr; | 366 | * { u64 nr; |
| 344 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP | 367 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP |
| @@ -347,6 +370,9 @@ enum perf_event_type { | |||
| 347 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 370 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
| 348 | * }; | 371 | * }; |
| 349 | */ | 372 | */ |
| 373 | PERF_EVENT_SAMPLE = 9, | ||
| 374 | |||
| 375 | PERF_EVENT_MAX, /* non-ABI */ | ||
| 350 | }; | 376 | }; |
| 351 | 377 | ||
| 352 | enum perf_callchain_context { | 378 | enum perf_callchain_context { |
| @@ -582,6 +608,7 @@ struct perf_counter_context { | |||
| 582 | int nr_counters; | 608 | int nr_counters; |
| 583 | int nr_active; | 609 | int nr_active; |
| 584 | int is_active; | 610 | int is_active; |
| 611 | int nr_stat; | ||
| 585 | atomic_t refcount; | 612 | atomic_t refcount; |
| 586 | struct task_struct *task; | 613 | struct task_struct *task; |
| 587 | 614 | ||
| @@ -669,7 +696,16 @@ static inline int is_software_counter(struct perf_counter *counter) | |||
| 669 | (counter->attr.type != PERF_TYPE_HW_CACHE); | 696 | (counter->attr.type != PERF_TYPE_HW_CACHE); |
| 670 | } | 697 | } |
| 671 | 698 | ||
| 672 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | 699 | extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; |
| 700 | |||
| 701 | extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
| 702 | |||
| 703 | static inline void | ||
| 704 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
| 705 | { | ||
| 706 | if (atomic_read(&perf_swcounter_enabled[event])) | ||
| 707 | __perf_swcounter_event(event, nr, nmi, regs, addr); | ||
| 708 | } | ||
| 673 | 709 | ||
| 674 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | 710 | extern void __perf_counter_mmap(struct vm_area_struct *vma); |
| 675 | 711 | ||
