aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2008-07-22 15:08:51 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-26 05:48:02 -0400
commit73185e0a5d11d729d451692034fbe55a9eba7468 (patch)
tree470b57909a73ab0af5dc3602eb52b5b9db982b53 /drivers/oprofile
parent6657fe4f5650ff7174d418d4bfa50c4640e81a2b (diff)
drivers/oprofile: coding style fixes in buffer_sync.c
Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: oprofile-list <oprofile-list@lists.sourceforge.net> Cc: Barry Kasindorf <barry.kasindorf@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/oprofile')
-rw-r--r--drivers/oprofile/buffer_sync.c111
1 files changed, 57 insertions, 54 deletions
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 9304c4555079..69a732778ba7 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -33,7 +33,7 @@
33#include "event_buffer.h" 33#include "event_buffer.h"
34#include "cpu_buffer.h" 34#include "cpu_buffer.h"
35#include "buffer_sync.h" 35#include "buffer_sync.h"
36 36
37static LIST_HEAD(dying_tasks); 37static LIST_HEAD(dying_tasks);
38static LIST_HEAD(dead_tasks); 38static LIST_HEAD(dead_tasks);
39static cpumask_t marked_cpus = CPU_MASK_NONE; 39static cpumask_t marked_cpus = CPU_MASK_NONE;
@@ -48,10 +48,11 @@ static void process_task_mortuary(void);
48 * Can be invoked from softirq via RCU callback due to 48 * Can be invoked from softirq via RCU callback due to
49 * call_rcu() of the task struct, hence the _irqsave. 49 * call_rcu() of the task struct, hence the _irqsave.
50 */ 50 */
51static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) 51static int
52task_free_notify(struct notifier_block *self, unsigned long val, void *data)
52{ 53{
53 unsigned long flags; 54 unsigned long flags;
54 struct task_struct * task = data; 55 struct task_struct *task = data;
55 spin_lock_irqsave(&task_mortuary, flags); 56 spin_lock_irqsave(&task_mortuary, flags);
56 list_add(&task->tasks, &dying_tasks); 57 list_add(&task->tasks, &dying_tasks);
57 spin_unlock_irqrestore(&task_mortuary, flags); 58 spin_unlock_irqrestore(&task_mortuary, flags);
@@ -62,13 +63,14 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi
62/* The task is on its way out. A sync of the buffer means we can catch 63/* The task is on its way out. A sync of the buffer means we can catch
63 * any remaining samples for this task. 64 * any remaining samples for this task.
64 */ 65 */
65static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) 66static int
67task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
66{ 68{
67 /* To avoid latency problems, we only process the current CPU, 69 /* To avoid latency problems, we only process the current CPU,
68 * hoping that most samples for the task are on this CPU 70 * hoping that most samples for the task are on this CPU
69 */ 71 */
70 sync_buffer(raw_smp_processor_id()); 72 sync_buffer(raw_smp_processor_id());
71 return 0; 73 return 0;
72} 74}
73 75
74 76
@@ -77,11 +79,12 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
77 * we don't lose any. This does not have to be exact, it's a QoI issue 79 * we don't lose any. This does not have to be exact, it's a QoI issue
78 * only. 80 * only.
79 */ 81 */
80static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) 82static int
83munmap_notify(struct notifier_block *self, unsigned long val, void *data)
81{ 84{
82 unsigned long addr = (unsigned long)data; 85 unsigned long addr = (unsigned long)data;
83 struct mm_struct * mm = current->mm; 86 struct mm_struct *mm = current->mm;
84 struct vm_area_struct * mpnt; 87 struct vm_area_struct *mpnt;
85 88
86 down_read(&mm->mmap_sem); 89 down_read(&mm->mmap_sem);
87 90
@@ -99,11 +102,12 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
99 return 0; 102 return 0;
100} 103}
101 104
102 105
103/* We need to be told about new modules so we don't attribute to a previously 106/* We need to be told about new modules so we don't attribute to a previously
104 * loaded module, or drop the samples on the floor. 107 * loaded module, or drop the samples on the floor.
105 */ 108 */
106static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) 109static int
110module_load_notify(struct notifier_block *self, unsigned long val, void *data)
107{ 111{
108#ifdef CONFIG_MODULES 112#ifdef CONFIG_MODULES
109 if (val != MODULE_STATE_COMING) 113 if (val != MODULE_STATE_COMING)
@@ -118,7 +122,7 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v
118 return 0; 122 return 0;
119} 123}
120 124
121 125
122static struct notifier_block task_free_nb = { 126static struct notifier_block task_free_nb = {
123 .notifier_call = task_free_notify, 127 .notifier_call = task_free_notify,
124}; 128};
@@ -135,7 +139,7 @@ static struct notifier_block module_load_nb = {
135 .notifier_call = module_load_notify, 139 .notifier_call = module_load_notify,
136}; 140};
137 141
138 142
139static void end_sync(void) 143static void end_sync(void)
140{ 144{
141 end_cpu_work(); 145 end_cpu_work();
@@ -208,14 +212,14 @@ static inline unsigned long fast_get_dcookie(struct path *path)
208 * not strictly necessary but allows oprofile to associate 212 * not strictly necessary but allows oprofile to associate
209 * shared-library samples with particular applications 213 * shared-library samples with particular applications
210 */ 214 */
211static unsigned long get_exec_dcookie(struct mm_struct * mm) 215static unsigned long get_exec_dcookie(struct mm_struct *mm)
212{ 216{
213 unsigned long cookie = NO_COOKIE; 217 unsigned long cookie = NO_COOKIE;
214 struct vm_area_struct * vma; 218 struct vm_area_struct *vma;
215 219
216 if (!mm) 220 if (!mm)
217 goto out; 221 goto out;
218 222
219 for (vma = mm->mmap; vma; vma = vma->vm_next) { 223 for (vma = mm->mmap; vma; vma = vma->vm_next) {
220 if (!vma->vm_file) 224 if (!vma->vm_file)
221 continue; 225 continue;
@@ -235,13 +239,14 @@ out:
235 * sure to do this lookup before a mm->mmap modification happens so 239 * sure to do this lookup before a mm->mmap modification happens so
236 * we don't lose track. 240 * we don't lose track.
237 */ 241 */
238static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) 242static unsigned long
243lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
239{ 244{
240 unsigned long cookie = NO_COOKIE; 245 unsigned long cookie = NO_COOKIE;
241 struct vm_area_struct * vma; 246 struct vm_area_struct *vma;
242 247
243 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { 248 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
244 249
245 if (addr < vma->vm_start || addr >= vma->vm_end) 250 if (addr < vma->vm_start || addr >= vma->vm_end)
246 continue; 251 continue;
247 252
@@ -265,7 +270,7 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o
265 270
266 271
267static unsigned long last_cookie = INVALID_COOKIE; 272static unsigned long last_cookie = INVALID_COOKIE;
268 273
269static void add_cpu_switch(int i) 274static void add_cpu_switch(int i)
270{ 275{
271 add_event_entry(ESCAPE_CODE); 276 add_event_entry(ESCAPE_CODE);
@@ -278,16 +283,16 @@ static void add_kernel_ctx_switch(unsigned int in_kernel)
278{ 283{
279 add_event_entry(ESCAPE_CODE); 284 add_event_entry(ESCAPE_CODE);
280 if (in_kernel) 285 if (in_kernel)
281 add_event_entry(KERNEL_ENTER_SWITCH_CODE); 286 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
282 else 287 else
283 add_event_entry(KERNEL_EXIT_SWITCH_CODE); 288 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
284} 289}
285 290
286static void 291static void
287add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) 292add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
288{ 293{
289 add_event_entry(ESCAPE_CODE); 294 add_event_entry(ESCAPE_CODE);
290 add_event_entry(CTX_SWITCH_CODE); 295 add_event_entry(CTX_SWITCH_CODE);
291 add_event_entry(task->pid); 296 add_event_entry(task->pid);
292 add_event_entry(cookie); 297 add_event_entry(cookie);
293 /* Another code for daemon back-compat */ 298 /* Another code for daemon back-compat */
@@ -296,7 +301,7 @@ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
296 add_event_entry(task->tgid); 301 add_event_entry(task->tgid);
297} 302}
298 303
299 304
300static void add_cookie_switch(unsigned long cookie) 305static void add_cookie_switch(unsigned long cookie)
301{ 306{
302 add_event_entry(ESCAPE_CODE); 307 add_event_entry(ESCAPE_CODE);
@@ -304,7 +309,7 @@ static void add_cookie_switch(unsigned long cookie)
304 add_event_entry(cookie); 309 add_event_entry(cookie);
305} 310}
306 311
307 312
308static void add_trace_begin(void) 313static void add_trace_begin(void)
309{ 314{
310 add_event_entry(ESCAPE_CODE); 315 add_event_entry(ESCAPE_CODE);
@@ -319,13 +324,13 @@ static void add_sample_entry(unsigned long offset, unsigned long event)
319} 324}
320 325
321 326
322static int add_us_sample(struct mm_struct * mm, struct op_sample * s) 327static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
323{ 328{
324 unsigned long cookie; 329 unsigned long cookie;
325 off_t offset; 330 off_t offset;
326 331
327 cookie = lookup_dcookie(mm, s->eip, &offset); 332 cookie = lookup_dcookie(mm, s->eip, &offset);
328 333
329 if (cookie == INVALID_COOKIE) { 334 if (cookie == INVALID_COOKIE) {
330 atomic_inc(&oprofile_stats.sample_lost_no_mapping); 335 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
331 return 0; 336 return 0;
@@ -341,13 +346,13 @@ static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
341 return 1; 346 return 1;
342} 347}
343 348
344 349
345/* Add a sample to the global event buffer. If possible the 350/* Add a sample to the global event buffer. If possible the
346 * sample is converted into a persistent dentry/offset pair 351 * sample is converted into a persistent dentry/offset pair
347 * for later lookup from userspace. 352 * for later lookup from userspace.
348 */ 353 */
349static int 354static int
350add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) 355add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
351{ 356{
352 if (in_kernel) { 357 if (in_kernel) {
353 add_sample_entry(s->eip, s->event); 358 add_sample_entry(s->eip, s->event);
@@ -359,9 +364,9 @@ add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
359 } 364 }
360 return 0; 365 return 0;
361} 366}
362
363 367
364static void release_mm(struct mm_struct * mm) 368
369static void release_mm(struct mm_struct *mm)
365{ 370{
366 if (!mm) 371 if (!mm)
367 return; 372 return;
@@ -370,9 +375,9 @@ static void release_mm(struct mm_struct * mm)
370} 375}
371 376
372 377
373static struct mm_struct * take_tasks_mm(struct task_struct * task) 378static struct mm_struct *take_tasks_mm(struct task_struct *task)
374{ 379{
375 struct mm_struct * mm = get_task_mm(task); 380 struct mm_struct *mm = get_task_mm(task);
376 if (mm) 381 if (mm)
377 down_read(&mm->mmap_sem); 382 down_read(&mm->mmap_sem);
378 return mm; 383 return mm;
@@ -383,10 +388,10 @@ static inline int is_code(unsigned long val)
383{ 388{
384 return val == ESCAPE_CODE; 389 return val == ESCAPE_CODE;
385} 390}
386 391
387 392
388/* "acquire" as many cpu buffer slots as we can */ 393/* "acquire" as many cpu buffer slots as we can */
389static unsigned long get_slots(struct oprofile_cpu_buffer * b) 394static unsigned long get_slots(struct oprofile_cpu_buffer *b)
390{ 395{
391 unsigned long head = b->head_pos; 396 unsigned long head = b->head_pos;
392 unsigned long tail = b->tail_pos; 397 unsigned long tail = b->tail_pos;
@@ -412,7 +417,7 @@ static unsigned long get_slots(struct oprofile_cpu_buffer * b)
412} 417}
413 418
414 419
415static void increment_tail(struct oprofile_cpu_buffer * b) 420static void increment_tail(struct oprofile_cpu_buffer *b)
416{ 421{
417 unsigned long new_tail = b->tail_pos + 1; 422 unsigned long new_tail = b->tail_pos + 1;
418 423
@@ -435,8 +440,8 @@ static void process_task_mortuary(void)
435{ 440{
436 unsigned long flags; 441 unsigned long flags;
437 LIST_HEAD(local_dead_tasks); 442 LIST_HEAD(local_dead_tasks);
438 struct task_struct * task; 443 struct task_struct *task;
439 struct task_struct * ttask; 444 struct task_struct *ttask;
440 445
441 spin_lock_irqsave(&task_mortuary, flags); 446 spin_lock_irqsave(&task_mortuary, flags);
442 447
@@ -493,7 +498,7 @@ void sync_buffer(int cpu)
493{ 498{
494 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 499 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
495 struct mm_struct *mm = NULL; 500 struct mm_struct *mm = NULL;
496 struct task_struct * new; 501 struct task_struct *new;
497 unsigned long cookie = 0; 502 unsigned long cookie = 0;
498 int in_kernel = 1; 503 int in_kernel = 1;
499 unsigned int i; 504 unsigned int i;
@@ -501,7 +506,7 @@ void sync_buffer(int cpu)
501 unsigned long available; 506 unsigned long available;
502 507
503 mutex_lock(&buffer_mutex); 508 mutex_lock(&buffer_mutex);
504 509
505 add_cpu_switch(cpu); 510 add_cpu_switch(cpu);
506 511
507 /* Remember, only we can modify tail_pos */ 512 /* Remember, only we can modify tail_pos */
@@ -509,8 +514,8 @@ void sync_buffer(int cpu)
509 available = get_slots(cpu_buf); 514 available = get_slots(cpu_buf);
510 515
511 for (i = 0; i < available; ++i) { 516 for (i = 0; i < available; ++i) {
512 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; 517 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
513 518
514 if (is_code(s->eip)) { 519 if (is_code(s->eip)) {
515 if (s->event <= CPU_IS_KERNEL) { 520 if (s->event <= CPU_IS_KERNEL) {
516 /* kernel/userspace switch */ 521 /* kernel/userspace switch */
@@ -522,7 +527,7 @@ void sync_buffer(int cpu)
522 state = sb_bt_start; 527 state = sb_bt_start;
523 add_trace_begin(); 528 add_trace_begin();
524 } else { 529 } else {
525 struct mm_struct * oldmm = mm; 530 struct mm_struct *oldmm = mm;
526 531
527 /* userspace context switch */ 532 /* userspace context switch */
528 new = (struct task_struct *)s->event; 533 new = (struct task_struct *)s->event;
@@ -533,13 +538,11 @@ void sync_buffer(int cpu)
533 cookie = get_exec_dcookie(mm); 538 cookie = get_exec_dcookie(mm);
534 add_user_ctx_switch(new, cookie); 539 add_user_ctx_switch(new, cookie);
535 } 540 }
536 } else { 541 } else if (state >= sb_bt_start &&
537 if (state >= sb_bt_start && 542 !add_sample(mm, s, in_kernel)) {
538 !add_sample(mm, s, in_kernel)) { 543 if (state == sb_bt_start) {
539 if (state == sb_bt_start) { 544 state = sb_bt_ignore;
540 state = sb_bt_ignore; 545 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
541 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
542 }
543 } 546 }
544 } 547 }
545 548