aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
-rw-r--r--drivers/oprofile/cpu_buffer.c197
1 files changed, 99 insertions, 98 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 01d38e78cde1..61090969158f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -28,6 +28,25 @@
28#include "buffer_sync.h" 28#include "buffer_sync.h"
29#include "oprof.h" 29#include "oprof.h"
30 30
31#define OP_BUFFER_FLAGS 0
32
33/*
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write;
31DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
32 51
33static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
@@ -37,12 +56,12 @@ static int work_enabled;
37 56
38void free_cpu_buffers(void) 57void free_cpu_buffers(void)
39{ 58{
40 int i; 59 if (op_ring_buffer_read)
41 60 ring_buffer_free(op_ring_buffer_read);
42 for_each_possible_cpu(i) { 61 op_ring_buffer_read = NULL;
43 vfree(per_cpu(cpu_buffer, i).buffer); 62 if (op_ring_buffer_write)
44 per_cpu(cpu_buffer, i).buffer = NULL; 63 ring_buffer_free(op_ring_buffer_write);
45 } 64 op_ring_buffer_write = NULL;
46} 65}
47 66
48unsigned long oprofile_get_cpu_buffer_size(void) 67unsigned long oprofile_get_cpu_buffer_size(void)
@@ -64,14 +83,16 @@ int alloc_cpu_buffers(void)
64 83
65 unsigned long buffer_size = fs_cpu_buffer_size; 84 unsigned long buffer_size = fs_cpu_buffer_size;
66 85
86 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
87 if (!op_ring_buffer_read)
88 goto fail;
89 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_write)
91 goto fail;
92
67 for_each_possible_cpu(i) { 93 for_each_possible_cpu(i) {
68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 94 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
69 95
70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
71 cpu_to_node(i));
72 if (!b->buffer)
73 goto fail;
74
75 b->last_task = NULL; 96 b->last_task = NULL;
76 b->last_is_kernel = -1; 97 b->last_is_kernel = -1;
77 b->tracing = 0; 98 b->tracing = 0;
@@ -124,57 +145,31 @@ void end_cpu_work(void)
124 flush_scheduled_work(); 145 flush_scheduled_work();
125} 146}
126 147
127/* Resets the cpu buffer to a sane state. */ 148static inline int
128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf) 149add_sample(struct oprofile_cpu_buffer *cpu_buf,
129{ 150 unsigned long pc, unsigned long event)
130 /* reset these to invalid values; the next sample
131 * collected will populate the buffer with proper
132 * values to initialize the buffer
133 */
134 cpu_buf->last_is_kernel = -1;
135 cpu_buf->last_task = NULL;
136}
137
138/* compute number of available slots in cpu_buffer queue */
139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
140{ 151{
141 unsigned long head = b->head_pos; 152 struct op_entry entry;
142 unsigned long tail = b->tail_pos; 153 int ret;
143 154
144 if (tail > head) 155 ret = cpu_buffer_write_entry(&entry);
145 return (tail - head) - 1; 156 if (ret)
157 return ret;
146 158
147 return tail + (b->buffer_size - head) - 1; 159 entry.sample->eip = pc;
148} 160 entry.sample->event = event;
149 161
150static void increment_head(struct oprofile_cpu_buffer *b) 162 ret = cpu_buffer_write_commit(&entry);
151{ 163 if (ret)
152 unsigned long new_head = b->head_pos + 1; 164 return ret;
153
154 /* Ensure anything written to the slot before we
155 * increment is visible */
156 wmb();
157
158 if (new_head < b->buffer_size)
159 b->head_pos = new_head;
160 else
161 b->head_pos = 0;
162}
163 165
164static inline void 166 return 0;
165add_sample(struct oprofile_cpu_buffer *cpu_buf,
166 unsigned long pc, unsigned long event)
167{
168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
169 entry->eip = pc;
170 entry->event = event;
171 increment_head(cpu_buf);
172} 167}
173 168
174static inline void 169static inline int
175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) 170add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
176{ 171{
177 add_sample(buffer, ESCAPE_CODE, value); 172 return add_sample(buffer, ESCAPE_CODE, value);
178} 173}
179 174
180/* This must be safe from any context. It's safe writing here 175/* This must be safe from any context. It's safe writing here
@@ -198,11 +193,6 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
198 return 0; 193 return 0;
199 } 194 }
200 195
201 if (nr_available_slots(cpu_buf) < 3) {
202 cpu_buf->sample_lost_overflow++;
203 return 0;
204 }
205
206 is_kernel = !!is_kernel; 196 is_kernel = !!is_kernel;
207 197
208 task = current; 198 task = current;
@@ -210,26 +200,29 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
210 /* notice a switch from user->kernel or vice versa */ 200 /* notice a switch from user->kernel or vice versa */
211 if (cpu_buf->last_is_kernel != is_kernel) { 201 if (cpu_buf->last_is_kernel != is_kernel) {
212 cpu_buf->last_is_kernel = is_kernel; 202 cpu_buf->last_is_kernel = is_kernel;
213 add_code(cpu_buf, is_kernel); 203 if (add_code(cpu_buf, is_kernel))
204 goto fail;
214 } 205 }
215 206
216 /* notice a task switch */ 207 /* notice a task switch */
217 if (cpu_buf->last_task != task) { 208 if (cpu_buf->last_task != task) {
218 cpu_buf->last_task = task; 209 cpu_buf->last_task = task;
219 add_code(cpu_buf, (unsigned long)task); 210 if (add_code(cpu_buf, (unsigned long)task))
211 goto fail;
220 } 212 }
221 213
222 add_sample(cpu_buf, pc, event); 214 if (add_sample(cpu_buf, pc, event))
215 goto fail;
216
223 return 1; 217 return 1;
218
219fail:
220 cpu_buf->sample_lost_overflow++;
221 return 0;
224} 222}
225 223
226static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 224static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
227{ 225{
228 if (nr_available_slots(cpu_buf) < 4) {
229 cpu_buf->sample_lost_overflow++;
230 return 0;
231 }
232
233 add_code(cpu_buf, CPU_TRACE_BEGIN); 226 add_code(cpu_buf, CPU_TRACE_BEGIN);
234 cpu_buf->tracing = 1; 227 cpu_buf->tracing = 1;
235 return 1; 228 return 1;
@@ -253,8 +246,10 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
253 if (!oprofile_begin_trace(cpu_buf)) 246 if (!oprofile_begin_trace(cpu_buf))
254 return; 247 return;
255 248
256 /* if log_sample() fail we can't backtrace since we lost the source 249 /*
257 * of this event */ 250 * if log_sample() fail we can't backtrace since we lost the
251 * source of this event
252 */
258 if (log_sample(cpu_buf, pc, is_kernel, event)) 253 if (log_sample(cpu_buf, pc, is_kernel, event))
259 oprofile_ops.backtrace(regs, backtrace_depth); 254 oprofile_ops.backtrace(regs, backtrace_depth);
260 oprofile_end_trace(cpu_buf); 255 oprofile_end_trace(cpu_buf);
@@ -272,49 +267,55 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
272 267
273#define MAX_IBS_SAMPLE_SIZE 14 268#define MAX_IBS_SAMPLE_SIZE 14
274 269
275void oprofile_add_ibs_sample(struct pt_regs *const regs, 270void oprofile_add_ibs_sample(struct pt_regs * const regs,
276 unsigned int *const ibs_sample, int ibs_code) 271 unsigned int * const ibs_sample, int ibs_code)
277{ 272{
278 int is_kernel = !user_mode(regs); 273 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 274 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
280 struct task_struct *task; 275 struct task_struct *task;
276 int fail = 0;
281 277
282 cpu_buf->sample_received++; 278 cpu_buf->sample_received++;
283 279
284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
285 /* we can't backtrace since we lost the source of this event */
286 cpu_buf->sample_lost_overflow++;
287 return;
288 }
289
290 /* notice a switch from user->kernel or vice versa */ 280 /* notice a switch from user->kernel or vice versa */
291 if (cpu_buf->last_is_kernel != is_kernel) { 281 if (cpu_buf->last_is_kernel != is_kernel) {
282 if (add_code(cpu_buf, is_kernel))
283 goto fail;
292 cpu_buf->last_is_kernel = is_kernel; 284 cpu_buf->last_is_kernel = is_kernel;
293 add_code(cpu_buf, is_kernel);
294 } 285 }
295 286
296 /* notice a task switch */ 287 /* notice a task switch */
297 if (!is_kernel) { 288 if (!is_kernel) {
298 task = current; 289 task = current;
299 if (cpu_buf->last_task != task) { 290 if (cpu_buf->last_task != task) {
291 if (add_code(cpu_buf, (unsigned long)task))
292 goto fail;
300 cpu_buf->last_task = task; 293 cpu_buf->last_task = task;
301 add_code(cpu_buf, (unsigned long)task);
302 } 294 }
303 } 295 }
304 296
305 add_code(cpu_buf, ibs_code); 297 fail = fail || add_code(cpu_buf, ibs_code);
306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); 298 fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); 299 fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); 300 fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
309 301
310 if (ibs_code == IBS_OP_BEGIN) { 302 if (ibs_code == IBS_OP_BEGIN) {
311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); 303 fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); 304 fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); 305 fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
314 } 306 }
315 307
308 if (fail)
309 goto fail;
310
316 if (backtrace_depth) 311 if (backtrace_depth)
317 oprofile_ops.backtrace(regs, backtrace_depth); 312 oprofile_ops.backtrace(regs, backtrace_depth);
313
314 return;
315
316fail:
317 cpu_buf->sample_lost_overflow++;
318 return;
318} 319}
319 320
320#endif 321#endif
@@ -332,21 +333,21 @@ void oprofile_add_trace(unsigned long pc)
332 if (!cpu_buf->tracing) 333 if (!cpu_buf->tracing)
333 return; 334 return;
334 335
335 if (nr_available_slots(cpu_buf) < 1) { 336 /*
336 cpu_buf->tracing = 0; 337 * broken frame can give an eip with the same value as an
337 cpu_buf->sample_lost_overflow++; 338 * escape code, abort the trace if we get it
338 return; 339 */
339 } 340 if (pc == ESCAPE_CODE)
341 goto fail;
340 342
341 /* broken frame can give an eip with the same value as an escape code, 343 if (add_sample(cpu_buf, pc, 0))
342 * abort the trace if we get it */ 344 goto fail;
343 if (pc == ESCAPE_CODE) {
344 cpu_buf->tracing = 0;
345 cpu_buf->backtrace_aborted++;
346 return;
347 }
348 345
349 add_sample(cpu_buf, pc, 0); 346 return;
347fail:
348 cpu_buf->tracing = 0;
349 cpu_buf->backtrace_aborted++;
350 return;
350} 351}
351 352
352/* 353/*