aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-11 08:44:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-11 11:54:42 -0400
commita308444ceb576d3089f9ca0dfd097eba6f1e623f (patch)
tree2cff57810e3362c829fab5a42cd0afafb4500aad
parent8be6e8f3c3a13900169f1141870562d0c723b010 (diff)
perf_counter: Better align code
Whitespace and comment bits. Also update copyrights. [ Impact: cleanup ] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission>
-rw-r--r--include/linux/perf_counter.h165
1 files changed, 85 insertions, 80 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 20cf5af27ade..1fa1a26cb1b3 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -1,12 +1,13 @@
1/* 1/*
2 * Performance counters: 2 * Performance counters:
3 * 3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
6 * 7 *
7 * Data type definitions, declarations, prototypes. 8 * Data type definitions, declarations, prototypes.
8 * 9 *
9 * Started by: Thomas Gleixner and Ingo Molnar 10 * Started by: Thomas Gleixner and Ingo Molnar
10 * 11 *
11 * For licencing details see kernel-base/COPYING 12 * For licencing details see kernel-base/COPYING
12 */ 13 */
@@ -25,18 +26,19 @@
25 * attr.type 26 * attr.type
26 */ 27 */
27enum perf_type_id { 28enum perf_type_id {
28 PERF_TYPE_HARDWARE = 0, 29 PERF_TYPE_HARDWARE = 0,
29 PERF_TYPE_SOFTWARE = 1, 30 PERF_TYPE_SOFTWARE = 1,
30 PERF_TYPE_TRACEPOINT = 2, 31 PERF_TYPE_TRACEPOINT = 2,
31 PERF_TYPE_HW_CACHE = 3, 32 PERF_TYPE_HW_CACHE = 3,
32 PERF_TYPE_RAW = 4, 33 PERF_TYPE_RAW = 4,
33 34
34 PERF_TYPE_MAX, /* non ABI */ 35 PERF_TYPE_MAX, /* non-ABI */
35}; 36};
36 37
37/* 38/*
38 * Generalized performance counter event types, used by the attr.event_id 39 * Generalized performance counter event types, used by the
39 * parameter of the sys_perf_counter_open() syscall: 40 * attr.event_id parameter of the sys_perf_counter_open()
41 * syscall:
40 */ 42 */
41enum perf_hw_id { 43enum perf_hw_id {
42 /* 44 /*
@@ -50,7 +52,7 @@ enum perf_hw_id {
50 PERF_COUNT_HW_BRANCH_MISSES = 5, 52 PERF_COUNT_HW_BRANCH_MISSES = 5,
51 PERF_COUNT_HW_BUS_CYCLES = 6, 53 PERF_COUNT_HW_BUS_CYCLES = 6,
52 54
53 PERF_COUNT_HW_MAX, /* non ABI */ 55 PERF_COUNT_HW_MAX, /* non-ABI */
54}; 56};
55 57
56/* 58/*
@@ -61,29 +63,29 @@ enum perf_hw_id {
61 * { accesses, misses } 63 * { accesses, misses }
62 */ 64 */
63enum perf_hw_cache_id { 65enum perf_hw_cache_id {
64 PERF_COUNT_HW_CACHE_L1D = 0, 66 PERF_COUNT_HW_CACHE_L1D = 0,
65 PERF_COUNT_HW_CACHE_L1I = 1, 67 PERF_COUNT_HW_CACHE_L1I = 1,
66 PERF_COUNT_HW_CACHE_LL = 2, 68 PERF_COUNT_HW_CACHE_LL = 2,
67 PERF_COUNT_HW_CACHE_DTLB = 3, 69 PERF_COUNT_HW_CACHE_DTLB = 3,
68 PERF_COUNT_HW_CACHE_ITLB = 4, 70 PERF_COUNT_HW_CACHE_ITLB = 4,
69 PERF_COUNT_HW_CACHE_BPU = 5, 71 PERF_COUNT_HW_CACHE_BPU = 5,
70 72
71 PERF_COUNT_HW_CACHE_MAX, /* non ABI */ 73 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
72}; 74};
73 75
74enum perf_hw_cache_op_id { 76enum perf_hw_cache_op_id {
75 PERF_COUNT_HW_CACHE_OP_READ = 0, 77 PERF_COUNT_HW_CACHE_OP_READ = 0,
76 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 78 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
77 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 79 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
78 80
79 PERF_COUNT_HW_CACHE_OP_MAX, /* non ABI */ 81 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
80}; 82};
81 83
82enum perf_hw_cache_op_result_id { 84enum perf_hw_cache_op_result_id {
83 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 85 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
84 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 86 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
85 87
86 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non ABI */ 88 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
87}; 89};
88 90
89/* 91/*
@@ -93,15 +95,15 @@ enum perf_hw_cache_op_result_id {
93 * well): 95 * well):
94 */ 96 */
95enum perf_sw_ids { 97enum perf_sw_ids {
96 PERF_COUNT_SW_CPU_CLOCK = 0, 98 PERF_COUNT_SW_CPU_CLOCK = 0,
97 PERF_COUNT_SW_TASK_CLOCK = 1, 99 PERF_COUNT_SW_TASK_CLOCK = 1,
98 PERF_COUNT_SW_PAGE_FAULTS = 2, 100 PERF_COUNT_SW_PAGE_FAULTS = 2,
99 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 101 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
100 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 102 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
101 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 103 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
102 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 104 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
103 105
104 PERF_COUNT_SW_MAX, /* non ABI */ 106 PERF_COUNT_SW_MAX, /* non-ABI */
105}; 107};
106 108
107/* 109/*
@@ -109,15 +111,15 @@ enum perf_sw_ids {
109 * in the overflow packets. 111 * in the overflow packets.
110 */ 112 */
111enum perf_counter_sample_format { 113enum perf_counter_sample_format {
112 PERF_SAMPLE_IP = 1U << 0, 114 PERF_SAMPLE_IP = 1U << 0,
113 PERF_SAMPLE_TID = 1U << 1, 115 PERF_SAMPLE_TID = 1U << 1,
114 PERF_SAMPLE_TIME = 1U << 2, 116 PERF_SAMPLE_TIME = 1U << 2,
115 PERF_SAMPLE_ADDR = 1U << 3, 117 PERF_SAMPLE_ADDR = 1U << 3,
116 PERF_SAMPLE_GROUP = 1U << 4, 118 PERF_SAMPLE_GROUP = 1U << 4,
117 PERF_SAMPLE_CALLCHAIN = 1U << 5, 119 PERF_SAMPLE_CALLCHAIN = 1U << 5,
118 PERF_SAMPLE_ID = 1U << 6, 120 PERF_SAMPLE_ID = 1U << 6,
119 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
120 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
121}; 123};
122 124
123/* 125/*
@@ -126,9 +128,9 @@ enum perf_counter_sample_format {
126 * in increasing order of bit value, after the counter value. 128 * in increasing order of bit value, after the counter value.
127 */ 129 */
128enum perf_counter_read_format { 130enum perf_counter_read_format {
129 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 131 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
130 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 132 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
131 PERF_FORMAT_ID = 1U << 2, 133 PERF_FORMAT_ID = 1U << 2,
132}; 134};
133 135
134/* 136/*
@@ -229,12 +231,12 @@ struct perf_counter_mmap_page {
229 __u64 data_head; /* head in the data section */ 231 __u64 data_head; /* head in the data section */
230}; 232};
231 233
232#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) 234#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
233#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) 235#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
234#define PERF_EVENT_MISC_KERNEL (1 << 0) 236#define PERF_EVENT_MISC_KERNEL (1 << 0)
235#define PERF_EVENT_MISC_USER (2 << 0) 237#define PERF_EVENT_MISC_USER (2 << 0)
236#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) 238#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
237#define PERF_EVENT_MISC_OVERFLOW (1 << 2) 239#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
238 240
239struct perf_event_header { 241struct perf_event_header {
240 __u32 type; 242 __u32 type;
@@ -351,14 +353,14 @@ struct hw_perf_counter {
351#ifdef CONFIG_PERF_COUNTERS 353#ifdef CONFIG_PERF_COUNTERS
352 union { 354 union {
353 struct { /* hardware */ 355 struct { /* hardware */
354 u64 config; 356 u64 config;
355 unsigned long config_base; 357 unsigned long config_base;
356 unsigned long counter_base; 358 unsigned long counter_base;
357 int idx; 359 int idx;
358 }; 360 };
359 union { /* software */ 361 union { /* software */
360 atomic64_t count; 362 atomic64_t count;
361 struct hrtimer hrtimer; 363 struct hrtimer hrtimer;
362 }; 364 };
363 }; 365 };
364 atomic64_t prev_count; 366 atomic64_t prev_count;
@@ -523,37 +525,37 @@ struct perf_counter_context {
523 * Protect the states of the counters in the list, 525 * Protect the states of the counters in the list,
524 * nr_active, and the list: 526 * nr_active, and the list:
525 */ 527 */
526 spinlock_t lock; 528 spinlock_t lock;
527 /* 529 /*
528 * Protect the list of counters. Locking either mutex or lock 530 * Protect the list of counters. Locking either mutex or lock
529 * is sufficient to ensure the list doesn't change; to change 531 * is sufficient to ensure the list doesn't change; to change
530 * the list you need to lock both the mutex and the spinlock. 532 * the list you need to lock both the mutex and the spinlock.
531 */ 533 */
532 struct mutex mutex; 534 struct mutex mutex;
533 535
534 struct list_head counter_list; 536 struct list_head counter_list;
535 struct list_head event_list; 537 struct list_head event_list;
536 int nr_counters; 538 int nr_counters;
537 int nr_active; 539 int nr_active;
538 int is_active; 540 int is_active;
539 atomic_t refcount; 541 atomic_t refcount;
540 struct task_struct *task; 542 struct task_struct *task;
541 543
542 /* 544 /*
543 * Context clock, runs when context enabled. 545 * Context clock, runs when context enabled.
544 */ 546 */
545 u64 time; 547 u64 time;
546 u64 timestamp; 548 u64 timestamp;
547 549
548 /* 550 /*
549 * These fields let us detect when two contexts have both 551 * These fields let us detect when two contexts have both
550 * been cloned (inherited) from a common ancestor. 552 * been cloned (inherited) from a common ancestor.
551 */ 553 */
552 struct perf_counter_context *parent_ctx; 554 struct perf_counter_context *parent_ctx;
553 u64 parent_gen; 555 u64 parent_gen;
554 u64 generation; 556 u64 generation;
555 int pin_count; 557 int pin_count;
556 struct rcu_head rcu_head; 558 struct rcu_head rcu_head;
557}; 559};
558 560
559/** 561/**
@@ -604,9 +606,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
604extern void perf_counter_update_userpage(struct perf_counter *counter); 606extern void perf_counter_update_userpage(struct perf_counter *counter);
605 607
606struct perf_sample_data { 608struct perf_sample_data {
607 struct pt_regs *regs; 609 struct pt_regs *regs;
608 u64 addr; 610 u64 addr;
609 u64 period; 611 u64 period;
610}; 612};
611 613
612extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 614extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
@@ -636,11 +638,14 @@ extern void perf_counter_fork(struct task_struct *tsk);
636 638
637extern void perf_counter_task_migration(struct task_struct *task, int cpu); 639extern void perf_counter_task_migration(struct task_struct *task, int cpu);
638 640
639#define MAX_STACK_DEPTH 255 641#define MAX_STACK_DEPTH 255
640 642
641struct perf_callchain_entry { 643struct perf_callchain_entry {
642 u16 nr, hv, kernel, user; 644 u16 nr;
643 u64 ip[MAX_STACK_DEPTH]; 645 u16 hv;
646 u16 kernel;
647 u16 user;
648 u64 ip[MAX_STACK_DEPTH];
644}; 649};
645 650
646extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 651extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);