aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-03-29 13:26:51 -0400
committerOleg Nesterov <oleg@redhat.com>2013-04-13 09:32:01 -0400
commit457d1772f1c1bcf37b2ae7fc8f1d6f303d1d5cf9 (patch)
tree49da165c316ec7086272994ede7ac96e334d6850 /kernel/trace
parent0e3853d202e8b2720bc4c674dc58849b2662c8f8 (diff)
uprobes/tracing: Generalize struct uprobe_trace_entry_head
struct uprobe_trace_entry_head has a single member for reporting, "unsigned long ip". If we want to support uretprobes we need to create another struct which has "func" and "ret_ip" and duplicate a lot of functions, like trace_kprobe.c does. To avoid this copy-and-paste horror we turn ->ip into ->vaddr[] and add couple of trivial helpers to calculate sizeof/data. This uglifies the code a bit, but this allows us to avoid a lot more complications later, when we add the support for ret-probes. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Tested-by: Anton Arapov <anton@redhat.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_uprobe.c62
2 files changed, 36 insertions, 31 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2081971367ea..8bed1dfcb938 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -103,11 +103,6 @@ struct kretprobe_trace_entry_head {
103 unsigned long ret_ip; 103 unsigned long ret_ip;
104}; 104};
105 105
106struct uprobe_trace_entry_head {
107 struct trace_entry ent;
108 unsigned long ip;
109};
110
111/* 106/*
112 * trace_flag_type is an enumeration that holds different 107 * trace_flag_type is an enumeration that holds different
113 * states when a trace occurs. These are: 108 * states when a trace occurs. These are:
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 43d258db998e..49b400368bfd 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -28,6 +28,18 @@
28 28
29#define UPROBE_EVENT_SYSTEM "uprobes" 29#define UPROBE_EVENT_SYSTEM "uprobes"
30 30
31struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
31struct trace_uprobe_filter { 43struct trace_uprobe_filter {
32 rwlock_t rwlock; 44 rwlock_t rwlock;
33 int nr_systemwide; 45 int nr_systemwide;
@@ -491,20 +503,19 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
491 struct uprobe_trace_entry_head *entry; 503 struct uprobe_trace_entry_head *entry;
492 struct ring_buffer_event *event; 504 struct ring_buffer_event *event;
493 struct ring_buffer *buffer; 505 struct ring_buffer *buffer;
494 u8 *data; 506 void *data;
495 int size, i; 507 int size, i;
496 struct ftrace_event_call *call = &tu->call; 508 struct ftrace_event_call *call = &tu->call;
497 509
498 size = sizeof(*entry) + tu->size; 510 size = SIZEOF_TRACE_ENTRY(false) + tu->size;
499
500 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 511 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
501 size, 0, 0); 512 size, 0, 0);
502 if (!event) 513 if (!event)
503 return 0; 514 return 0;
504 515
505 entry = ring_buffer_event_data(event); 516 entry = ring_buffer_event_data(event);
506 entry->ip = instruction_pointer(regs); 517 entry->vaddr[0] = instruction_pointer(regs);
507 data = (u8 *)&entry[1]; 518 data = DATAOF_TRACE_ENTRY(entry, false);
508 for (i = 0; i < tu->nr_args; i++) 519 for (i = 0; i < tu->nr_args; i++)
509 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 520 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
510 521
@@ -518,22 +529,22 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
518static enum print_line_t 529static enum print_line_t
519print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) 530print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
520{ 531{
521 struct uprobe_trace_entry_head *field; 532 struct uprobe_trace_entry_head *entry;
522 struct trace_seq *s = &iter->seq; 533 struct trace_seq *s = &iter->seq;
523 struct trace_uprobe *tu; 534 struct trace_uprobe *tu;
524 u8 *data; 535 u8 *data;
525 int i; 536 int i;
526 537
527 field = (struct uprobe_trace_entry_head *)iter->ent; 538 entry = (struct uprobe_trace_entry_head *)iter->ent;
528 tu = container_of(event, struct trace_uprobe, call.event); 539 tu = container_of(event, struct trace_uprobe, call.event);
529 540
530 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, field->ip)) 541 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, entry->vaddr[0]))
531 goto partial; 542 goto partial;
532 543
533 data = (u8 *)&field[1]; 544 data = DATAOF_TRACE_ENTRY(entry, false);
534 for (i = 0; i < tu->nr_args; i++) { 545 for (i = 0; i < tu->nr_args; i++) {
535 if (!tu->args[i].type->print(s, tu->args[i].name, 546 if (!tu->args[i].type->print(s, tu->args[i].name,
536 data + tu->args[i].offset, field)) 547 data + tu->args[i].offset, entry))
537 goto partial; 548 goto partial;
538 } 549 }
539 550
@@ -585,16 +596,17 @@ static void probe_event_disable(struct trace_uprobe *tu, int flag)
585 596
586static int uprobe_event_define_fields(struct ftrace_event_call *event_call) 597static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
587{ 598{
588 int ret, i; 599 int ret, i, size;
589 struct uprobe_trace_entry_head field; 600 struct uprobe_trace_entry_head field;
590 struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data; 601 struct trace_uprobe *tu = event_call->data;
591 602
592 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 603 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
604 size = SIZEOF_TRACE_ENTRY(false);
593 /* Set argument names as fields */ 605 /* Set argument names as fields */
594 for (i = 0; i < tu->nr_args; i++) { 606 for (i = 0; i < tu->nr_args; i++) {
595 ret = trace_define_field(event_call, tu->args[i].type->fmttype, 607 ret = trace_define_field(event_call, tu->args[i].type->fmttype,
596 tu->args[i].name, 608 tu->args[i].name,
597 sizeof(field) + tu->args[i].offset, 609 size + tu->args[i].offset,
598 tu->args[i].type->size, 610 tu->args[i].type->size,
599 tu->args[i].type->is_signed, 611 tu->args[i].type->is_signed,
600 FILTER_OTHER); 612 FILTER_OTHER);
@@ -748,33 +760,31 @@ static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
748 struct ftrace_event_call *call = &tu->call; 760 struct ftrace_event_call *call = &tu->call;
749 struct uprobe_trace_entry_head *entry; 761 struct uprobe_trace_entry_head *entry;
750 struct hlist_head *head; 762 struct hlist_head *head;
751 u8 *data; 763 unsigned long ip;
752 int size, __size, i; 764 void *data;
753 int rctx; 765 int size, rctx, i;
754 766
755 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 767 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
756 return UPROBE_HANDLER_REMOVE; 768 return UPROBE_HANDLER_REMOVE;
757 769
758 __size = sizeof(*entry) + tu->size; 770 size = SIZEOF_TRACE_ENTRY(false);
759 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 771 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
760 size -= sizeof(u32);
761 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 772 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
762 return 0; 773 return 0;
763 774
764 preempt_disable(); 775 preempt_disable();
765
766 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 776 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
767 if (!entry) 777 if (!entry)
768 goto out; 778 goto out;
769 779
770 entry->ip = instruction_pointer(regs); 780 ip = instruction_pointer(regs);
771 data = (u8 *)&entry[1]; 781 entry->vaddr[0] = ip;
782 data = DATAOF_TRACE_ENTRY(entry, false);
772 for (i = 0; i < tu->nr_args; i++) 783 for (i = 0; i < tu->nr_args; i++)
773 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 784 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
774 785
775 head = this_cpu_ptr(call->perf_events); 786 head = this_cpu_ptr(call->perf_events);
776 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); 787 perf_trace_buf_submit(entry, size, rctx, ip, 1, regs, head, NULL);
777
778 out: 788 out:
779 preempt_enable(); 789 preempt_enable();
780 return 0; 790 return 0;
@@ -784,7 +794,7 @@ static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
784static 794static
785int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) 795int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
786{ 796{
787 struct trace_uprobe *tu = (struct trace_uprobe *)event->data; 797 struct trace_uprobe *tu = event->data;
788 798
789 switch (type) { 799 switch (type) {
790 case TRACE_REG_REGISTER: 800 case TRACE_REG_REGISTER: