aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-10 03:10:44 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-10 03:10:44 -0500
commita5a64498c194c82ecad3a2d67cff6231cda8d3dd (patch)
tree723d5d81419f9960b8d30ed9a2ece8a58d6c4328 /drivers/oprofile/cpu_buffer.c
parentbb93d802ae5c1949977cc6da247b218240677f11 (diff)
parentf7160c7573615ec82c691e294cf80d920b5d588d (diff)
Merge commit 'v2.6.28-rc4' into timers/rtc
Conflicts: drivers/rtc/rtc-cmos.c
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
-rw-r--r--drivers/oprofile/cpu_buffer.c106
1 files changed, 53 insertions, 53 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e1bd5a937f6c..01d38e78cde1 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -22,7 +22,7 @@
22#include <linux/oprofile.h> 22#include <linux/oprofile.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25 25
26#include "event_buffer.h" 26#include "event_buffer.h"
27#include "cpu_buffer.h" 27#include "cpu_buffer.h"
28#include "buffer_sync.h" 28#include "buffer_sync.h"
@@ -38,27 +38,40 @@ static int work_enabled;
38void free_cpu_buffers(void) 38void free_cpu_buffers(void)
39{ 39{
40 int i; 40 int i;
41 41
42 for_each_online_cpu(i) { 42 for_each_possible_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer); 43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL; 44 per_cpu(cpu_buffer, i).buffer = NULL;
45 } 45 }
46} 46}
47 47
48unsigned long oprofile_get_cpu_buffer_size(void)
49{
50 return fs_cpu_buffer_size;
51}
52
53void oprofile_cpu_buffer_inc_smpl_lost(void)
54{
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
57
58 cpu_buf->sample_lost_overflow++;
59}
60
48int alloc_cpu_buffers(void) 61int alloc_cpu_buffers(void)
49{ 62{
50 int i; 63 int i;
51 64
52 unsigned long buffer_size = fs_cpu_buffer_size; 65 unsigned long buffer_size = fs_cpu_buffer_size;
53 66
54 for_each_online_cpu(i) { 67 for_each_possible_cpu(i) {
55 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
56 69
57 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, 70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
58 cpu_to_node(i)); 71 cpu_to_node(i));
59 if (!b->buffer) 72 if (!b->buffer)
60 goto fail; 73 goto fail;
61 74
62 b->last_task = NULL; 75 b->last_task = NULL;
63 b->last_is_kernel = -1; 76 b->last_is_kernel = -1;
64 b->tracing = 0; 77 b->tracing = 0;
@@ -112,7 +125,7 @@ void end_cpu_work(void)
112} 125}
113 126
114/* Resets the cpu buffer to a sane state. */ 127/* Resets the cpu buffer to a sane state. */
115void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) 128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
116{ 129{
117 /* reset these to invalid values; the next sample 130 /* reset these to invalid values; the next sample
118 * collected will populate the buffer with proper 131 * collected will populate the buffer with proper
@@ -123,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
123} 136}
124 137
125/* compute number of available slots in cpu_buffer queue */ 138/* compute number of available slots in cpu_buffer queue */
126static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) 139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
127{ 140{
128 unsigned long head = b->head_pos; 141 unsigned long head = b->head_pos;
129 unsigned long tail = b->tail_pos; 142 unsigned long tail = b->tail_pos;
@@ -134,7 +147,7 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
134 return tail + (b->buffer_size - head) - 1; 147 return tail + (b->buffer_size - head) - 1;
135} 148}
136 149
137static void increment_head(struct oprofile_cpu_buffer * b) 150static void increment_head(struct oprofile_cpu_buffer *b)
138{ 151{
139 unsigned long new_head = b->head_pos + 1; 152 unsigned long new_head = b->head_pos + 1;
140 153
@@ -149,17 +162,17 @@ static void increment_head(struct oprofile_cpu_buffer * b)
149} 162}
150 163
151static inline void 164static inline void
152add_sample(struct oprofile_cpu_buffer * cpu_buf, 165add_sample(struct oprofile_cpu_buffer *cpu_buf,
153 unsigned long pc, unsigned long event) 166 unsigned long pc, unsigned long event)
154{ 167{
155 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; 168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
156 entry->eip = pc; 169 entry->eip = pc;
157 entry->event = event; 170 entry->event = event;
158 increment_head(cpu_buf); 171 increment_head(cpu_buf);
159} 172}
160 173
161static inline void 174static inline void
162add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) 175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
163{ 176{
164 add_sample(buffer, ESCAPE_CODE, value); 177 add_sample(buffer, ESCAPE_CODE, value);
165} 178}
@@ -173,10 +186,10 @@ add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
173 * pc. We tag this in the buffer by generating kernel enter/exit 186 * pc. We tag this in the buffer by generating kernel enter/exit
174 * events whenever is_kernel changes 187 * events whenever is_kernel changes
175 */ 188 */
176static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, 189static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
177 int is_kernel, unsigned long event) 190 int is_kernel, unsigned long event)
178{ 191{
179 struct task_struct * task; 192 struct task_struct *task;
180 193
181 cpu_buf->sample_received++; 194 cpu_buf->sample_received++;
182 195
@@ -205,7 +218,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
205 cpu_buf->last_task = task; 218 cpu_buf->last_task = task;
206 add_code(cpu_buf, (unsigned long)task); 219 add_code(cpu_buf, (unsigned long)task);
207 } 220 }
208 221
209 add_sample(cpu_buf, pc, event); 222 add_sample(cpu_buf, pc, event);
210 return 1; 223 return 1;
211} 224}
@@ -222,7 +235,7 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
222 return 1; 235 return 1;
223} 236}
224 237
225static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) 238static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
226{ 239{
227 cpu_buf->tracing = 0; 240 cpu_buf->tracing = 0;
228} 241}
@@ -257,21 +270,23 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
257 270
258#ifdef CONFIG_OPROFILE_IBS 271#ifdef CONFIG_OPROFILE_IBS
259 272
260#define MAX_IBS_SAMPLE_SIZE 14 273#define MAX_IBS_SAMPLE_SIZE 14
261static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, 274
262 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) 275void oprofile_add_ibs_sample(struct pt_regs *const regs,
276 unsigned int *const ibs_sample, int ibs_code)
263{ 277{
278 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
264 struct task_struct *task; 280 struct task_struct *task;
265 281
266 cpu_buf->sample_received++; 282 cpu_buf->sample_received++;
267 283
268 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { 284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
285 /* we can't backtrace since we lost the source of this event */
269 cpu_buf->sample_lost_overflow++; 286 cpu_buf->sample_lost_overflow++;
270 return 0; 287 return;
271 } 288 }
272 289
273 is_kernel = !!is_kernel;
274
275 /* notice a switch from user->kernel or vice versa */ 290 /* notice a switch from user->kernel or vice versa */
276 if (cpu_buf->last_is_kernel != is_kernel) { 291 if (cpu_buf->last_is_kernel != is_kernel) {
277 cpu_buf->last_is_kernel = is_kernel; 292 cpu_buf->last_is_kernel = is_kernel;
@@ -281,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
281 /* notice a task switch */ 296 /* notice a task switch */
282 if (!is_kernel) { 297 if (!is_kernel) {
283 task = current; 298 task = current;
284
285 if (cpu_buf->last_task != task) { 299 if (cpu_buf->last_task != task) {
286 cpu_buf->last_task = task; 300 cpu_buf->last_task = task;
287 add_code(cpu_buf, (unsigned long)task); 301 add_code(cpu_buf, (unsigned long)task);
@@ -289,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
289 } 303 }
290 304
291 add_code(cpu_buf, ibs_code); 305 add_code(cpu_buf, ibs_code);
292 add_sample(cpu_buf, ibs[0], ibs[1]); 306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
293 add_sample(cpu_buf, ibs[2], ibs[3]); 307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
294 add_sample(cpu_buf, ibs[4], ibs[5]); 308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
295 309
296 if (ibs_code == IBS_OP_BEGIN) { 310 if (ibs_code == IBS_OP_BEGIN) {
297 add_sample(cpu_buf, ibs[6], ibs[7]); 311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
298 add_sample(cpu_buf, ibs[8], ibs[9]); 312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
299 add_sample(cpu_buf, ibs[10], ibs[11]); 313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
300 }
301
302 return 1;
303}
304
305void oprofile_add_ibs_sample(struct pt_regs *const regs,
306 unsigned int * const ibs_sample, u8 code)
307{
308 int is_kernel = !user_mode(regs);
309 unsigned long pc = profile_pc(regs);
310
311 struct oprofile_cpu_buffer *cpu_buf =
312 &per_cpu(cpu_buffer, smp_processor_id());
313
314 if (!backtrace_depth) {
315 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
316 return;
317 } 314 }
318 315
319 /* if log_sample() fails we can't backtrace since we lost the source 316 if (backtrace_depth)
320 * of this event */
321 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
322 oprofile_ops.backtrace(regs, backtrace_depth); 317 oprofile_ops.backtrace(regs, backtrace_depth);
323} 318}
324 319
@@ -363,11 +358,16 @@ void oprofile_add_trace(unsigned long pc)
363 */ 358 */
364static void wq_sync_buffer(struct work_struct *work) 359static void wq_sync_buffer(struct work_struct *work)
365{ 360{
366 struct oprofile_cpu_buffer * b = 361 struct oprofile_cpu_buffer *b =
367 container_of(work, struct oprofile_cpu_buffer, work.work); 362 container_of(work, struct oprofile_cpu_buffer, work.work);
368 if (b->cpu != smp_processor_id()) { 363 if (b->cpu != smp_processor_id()) {
369 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", 364 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
370 smp_processor_id(), b->cpu); 365 smp_processor_id(), b->cpu);
366
367 if (!cpu_online(b->cpu)) {
368 cancel_delayed_work(&b->work);
369 return;
370 }
371 } 371 }
372 sync_buffer(b->cpu); 372 sync_buffer(b->cpu);
373 373