aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-19 08:15:04 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:18:25 -0400
commit5ee25c87318fa3722026fd77089fa7ba0db8d447 (patch)
tree37c71a027efe7751d0532a8049dde11b6a0b1b1d /arch/x86/kernel
parentb39f88acd7d989b6b247ba87c480fc24ed71d9c5 (diff)
perf, x86: Extract PEBS/BTS allocation functions
Mostly a cleanup.. it reduces code indentation and makes the code flow of reserve_ds_buffers() clearer. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Stephane Eranian <eranian@google.com> LKML-Reference: <20101019134808.253453452@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c90
1 files changed, 56 insertions, 34 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 1bc13518dd59..14d98bd52055 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -74,6 +74,32 @@ static void fini_debug_store_on_cpu(int cpu)
74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
75} 75}
76 76
77static int alloc_pebs_buffer(int cpu)
78{
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80 int max, thresh = 1; /* always use a single PEBS record */
81 void *buffer;
82
83 if (!x86_pmu.pebs)
84 return 0;
85
86 buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
87 if (unlikely(!buffer))
88 return -ENOMEM;
89
90 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
91
92 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
93 ds->pebs_index = ds->pebs_buffer_base;
94 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
95 max * x86_pmu.pebs_record_size;
96
97 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
98 thresh * x86_pmu.pebs_record_size;
99
100 return 0;
101}
102
77static void release_pebs_buffer(int cpu) 103static void release_pebs_buffer(int cpu)
78{ 104{
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 105 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -85,6 +111,32 @@ static void release_pebs_buffer(int cpu)
85 ds->pebs_buffer_base = 0; 111 ds->pebs_buffer_base = 0;
86} 112}
87 113
114static int alloc_bts_buffer(int cpu)
115{
116 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
117 int max, thresh;
118 void *buffer;
119
120 if (!x86_pmu.bts)
121 return 0;
122
123 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
124 if (unlikely(!buffer))
125 return -ENOMEM;
126
127 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
128 thresh = max / 16;
129
130 ds->bts_buffer_base = (u64)(unsigned long)buffer;
131 ds->bts_index = ds->bts_buffer_base;
132 ds->bts_absolute_maximum = ds->bts_buffer_base +
133 max * BTS_RECORD_SIZE;
134 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
135 thresh * BTS_RECORD_SIZE;
136
137 return 0;
138}
139
88static void release_bts_buffer(int cpu) 140static void release_bts_buffer(int cpu)
89{ 141{
90 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 142 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -133,8 +185,6 @@ static int reserve_ds_buffers(void)
133 185
134 for_each_possible_cpu(cpu) { 186 for_each_possible_cpu(cpu) {
135 struct debug_store *ds; 187 struct debug_store *ds;
136 void *buffer;
137 int max, thresh;
138 188
139 err = -ENOMEM; 189 err = -ENOMEM;
140 ds = kzalloc(sizeof(*ds), GFP_KERNEL); 190 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
@@ -142,39 +192,11 @@ static int reserve_ds_buffers(void)
142 break; 192 break;
143 per_cpu(cpu_hw_events, cpu).ds = ds; 193 per_cpu(cpu_hw_events, cpu).ds = ds;
144 194
145 if (x86_pmu.bts) { 195 if (alloc_bts_buffer(cpu))
146 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); 196 break;
147 if (unlikely(!buffer))
148 break;
149
150 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
151 thresh = max / 16;
152
153 ds->bts_buffer_base = (u64)(unsigned long)buffer;
154 ds->bts_index = ds->bts_buffer_base;
155 ds->bts_absolute_maximum = ds->bts_buffer_base +
156 max * BTS_RECORD_SIZE;
157 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
158 thresh * BTS_RECORD_SIZE;
159 }
160 197
161 if (x86_pmu.pebs) { 198 if (alloc_pebs_buffer(cpu))
162 buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); 199 break;
163 if (unlikely(!buffer))
164 break;
165
166 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
167
168 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
169 ds->pebs_index = ds->pebs_buffer_base;
170 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
171 max * x86_pmu.pebs_record_size;
172 /*
173 * Always use single record PEBS
174 */
175 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
176 x86_pmu.pebs_record_size;
177 }
178 200
179 err = 0; 201 err = 0;
180 } 202 }