aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 15:20:45 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:40:46 -0400
commitc7aafc549766b87819285d3480648fc652a47bc4 (patch)
tree4c36170a644633f930feca57b27f8c5d23644e70 /kernel/trace/trace.c
parent60a11774b38fef1ab90b18c5353bd1c7c4d311c8 (diff)
ftrace: cleanups
factor out code and clean it up. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c144
1 files changed, 91 insertions, 53 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f6d026f17dbb..61d2f0228866 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -142,12 +142,59 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
142 tracing_record_cmdline(current); 142 tracing_record_cmdline(current);
143} 143}
144 144
145void check_pages(struct trace_array_cpu *data)
146{
147 struct page *page, *tmp;
148
149 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
150 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
151
152 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
153 BUG_ON(page->lru.next->prev != &page->lru);
154 BUG_ON(page->lru.prev->next != &page->lru);
155 }
156}
157
158void *head_page(struct trace_array_cpu *data)
159{
160 struct page *page;
161
162 check_pages(data);
163 if (list_empty(&data->trace_pages))
164 return NULL;
165
166 page = list_entry(data->trace_pages.next, struct page, lru);
167 BUG_ON(&page->lru == &data->trace_pages);
168
169 return page_address(page);
170}
171
172notrace static void
173flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
174{
175 struct list_head flip_pages;
176
177 INIT_LIST_HEAD(&flip_pages);
178
179 tr1->trace_current = NULL;
180 memcpy(&tr1->trace_current_idx, &tr2->trace_current_idx,
181 sizeof(struct trace_array_cpu) -
182 offsetof(struct trace_array_cpu, trace_current_idx));
183
184 check_pages(tr1);
185 check_pages(tr2);
186 list_splice_init(&tr1->trace_pages, &flip_pages);
187 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
188 list_splice_init(&flip_pages, &tr2->trace_pages);
189 BUG_ON(!list_empty(&flip_pages));
190 check_pages(tr1);
191 check_pages(tr2);
192}
193
145notrace void 194notrace void
146update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 195update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
147{ 196{
148 struct trace_array_cpu *data; 197 struct trace_array_cpu *data;
149 void *save_trace;
150 struct list_head save_pages;
151 int i; 198 int i;
152 199
153 WARN_ON_ONCE(!irqs_disabled()); 200 WARN_ON_ONCE(!irqs_disabled());
@@ -155,11 +202,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
155 /* clear out all the previous traces */ 202 /* clear out all the previous traces */
156 for_each_possible_cpu(i) { 203 for_each_possible_cpu(i) {
157 data = tr->data[i]; 204 data = tr->data[i];
158 save_trace = max_tr.data[i]->trace; 205 flip_trace(max_tr.data[i], data);
159 save_pages = max_tr.data[i]->trace_pages;
160 memcpy(max_tr.data[i], data, sizeof(*data));
161 data->trace = save_trace;
162 data->trace_pages = save_pages;
163 tracing_reset(data); 206 tracing_reset(data);
164 } 207 }
165 208
@@ -177,8 +220,6 @@ notrace void
177update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 220update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
178{ 221{
179 struct trace_array_cpu *data = tr->data[cpu]; 222 struct trace_array_cpu *data = tr->data[cpu];
180 void *save_trace;
181 struct list_head save_pages;
182 int i; 223 int i;
183 224
184 WARN_ON_ONCE(!irqs_disabled()); 225 WARN_ON_ONCE(!irqs_disabled());
@@ -186,11 +227,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
186 for_each_possible_cpu(i) 227 for_each_possible_cpu(i)
187 tracing_reset(max_tr.data[i]); 228 tracing_reset(max_tr.data[i]);
188 229
189 save_trace = max_tr.data[cpu]->trace; 230 flip_trace(max_tr.data[cpu], data);
190 save_pages = max_tr.data[cpu]->trace_pages; 231
191 memcpy(max_tr.data[cpu], data, sizeof(*data));
192 data->trace = save_trace;
193 data->trace_pages = save_pages;
194 tracing_reset(data); 232 tracing_reset(data);
195 233
196 __update_max_tr(tr, tsk, cpu); 234 __update_max_tr(tr, tsk, cpu);
@@ -234,9 +272,9 @@ int register_tracer(struct tracer *type)
234 * If we fail, we do not register this tracer. 272 * If we fail, we do not register this tracer.
235 */ 273 */
236 for_each_possible_cpu(i) { 274 for_each_possible_cpu(i) {
237 if (!data->trace)
238 continue;
239 data = tr->data[i]; 275 data = tr->data[i];
276 if (!head_page(data))
277 continue;
240 tracing_reset(data); 278 tracing_reset(data);
241 } 279 }
242 current_trace = type; 280 current_trace = type;
@@ -298,7 +336,7 @@ void unregister_tracer(struct tracer *type)
298void notrace tracing_reset(struct trace_array_cpu *data) 336void notrace tracing_reset(struct trace_array_cpu *data)
299{ 337{
300 data->trace_idx = 0; 338 data->trace_idx = 0;
301 data->trace_current = data->trace; 339 data->trace_current = head_page(data);
302 data->trace_current_idx = 0; 340 data->trace_current_idx = 0;
303} 341}
304 342
@@ -425,26 +463,31 @@ notrace void tracing_record_cmdline(struct task_struct *tsk)
425} 463}
426 464
427static inline notrace struct trace_entry * 465static inline notrace struct trace_entry *
428tracing_get_trace_entry(struct trace_array *tr, 466tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
429 struct trace_array_cpu *data)
430{ 467{
431 unsigned long idx, idx_next; 468 unsigned long idx, idx_next;
432 struct trace_entry *entry; 469 struct trace_entry *entry;
433 struct page *page;
434 struct list_head *next; 470 struct list_head *next;
471 struct page *page;
435 472
436 data->trace_idx++; 473 data->trace_idx++;
437 idx = data->trace_current_idx; 474 idx = data->trace_current_idx;
438 idx_next = idx + 1; 475 idx_next = idx + 1;
439 476
477 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
478
440 entry = data->trace_current + idx * TRACE_ENTRY_SIZE; 479 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
441 480
442 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) { 481 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
443 page = virt_to_page(data->trace_current); 482 page = virt_to_page(data->trace_current);
444 if (unlikely(&page->lru == data->trace_pages.prev)) 483 /*
445 next = data->trace_pages.next; 484 * Roundrobin - but skip the head (which is not a real page):
446 else 485 */
447 next = page->lru.next; 486 next = page->lru.next;
487 if (unlikely(next == &data->trace_pages))
488 next = next->next;
489 BUG_ON(next == &data->trace_pages);
490
448 page = list_entry(next, struct page, lru); 491 page = list_entry(next, struct page, lru);
449 data->trace_current = page_address(page); 492 data->trace_current = page_address(page);
450 idx_next = 0; 493 idx_next = 0;
@@ -456,18 +499,17 @@ tracing_get_trace_entry(struct trace_array *tr,
456} 499}
457 500
458static inline notrace void 501static inline notrace void
459tracing_generic_entry_update(struct trace_entry *entry, 502tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
460 unsigned long flags)
461{ 503{
462 struct task_struct *tsk = current; 504 struct task_struct *tsk = current;
463 unsigned long pc; 505 unsigned long pc;
464 506
465 pc = preempt_count(); 507 pc = preempt_count();
466 508
467 entry->idx = atomic_inc_return(&tracer_counter); 509 entry->idx = atomic_inc_return(&tracer_counter);
468 entry->preempt_count = pc & 0xff; 510 entry->preempt_count = pc & 0xff;
469 entry->pid = tsk->pid; 511 entry->pid = tsk->pid;
470 entry->t = now(raw_smp_processor_id()); 512 entry->t = now(raw_smp_processor_id());
471 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 513 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
472 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 514 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
473 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 515 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -476,16 +518,15 @@ tracing_generic_entry_update(struct trace_entry *entry,
476 518
477notrace void 519notrace void
478ftrace(struct trace_array *tr, struct trace_array_cpu *data, 520ftrace(struct trace_array *tr, struct trace_array_cpu *data,
479 unsigned long ip, unsigned long parent_ip, 521 unsigned long ip, unsigned long parent_ip, unsigned long flags)
480 unsigned long flags)
481{ 522{
482 struct trace_entry *entry; 523 struct trace_entry *entry;
483 524
484 entry = tracing_get_trace_entry(tr, data); 525 entry = tracing_get_trace_entry(tr, data);
485 tracing_generic_entry_update(entry, flags); 526 tracing_generic_entry_update(entry, flags);
486 entry->type = TRACE_FN; 527 entry->type = TRACE_FN;
487 entry->fn.ip = ip; 528 entry->fn.ip = ip;
488 entry->fn.parent_ip = parent_ip; 529 entry->fn.parent_ip = parent_ip;
489} 530}
490 531
491notrace void 532notrace void
@@ -496,7 +537,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
496{ 537{
497 struct trace_entry *entry; 538 struct trace_entry *entry;
498 539
499 entry = tracing_get_trace_entry(tr, data); 540 entry = tracing_get_trace_entry(tr, data);
500 tracing_generic_entry_update(entry, flags); 541 tracing_generic_entry_update(entry, flags);
501 entry->type = TRACE_CTX; 542 entry->type = TRACE_CTX;
502 entry->ctx.prev_pid = prev->pid; 543 entry->ctx.prev_pid = prev->pid;
@@ -540,6 +581,8 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
540 } 581 }
541 582
542 page = list_entry(iter->next_page[cpu], struct page, lru); 583 page = list_entry(iter->next_page[cpu], struct page, lru);
584 BUG_ON(&data->trace_pages == &page->lru);
585
543 array = page_address(page); 586 array = page_address(page);
544 587
545 return &array[iter->next_page_idx[cpu]]; 588 return &array[iter->next_page_idx[cpu]];
@@ -554,7 +597,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu)
554 int cpu; 597 int cpu;
555 598
556 for_each_possible_cpu(cpu) { 599 for_each_possible_cpu(cpu) {
557 if (!tr->data[cpu]->trace) 600 if (!head_page(tr->data[cpu]))
558 continue; 601 continue;
559 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 602 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
560 if (ent && 603 if (ent &&
@@ -762,7 +805,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
762 name = type->name; 805 name = type->name;
763 806
764 for_each_possible_cpu(cpu) { 807 for_each_possible_cpu(cpu) {
765 if (tr->data[cpu]->trace) { 808 if (head_page(tr->data[cpu])) {
766 total += tr->data[cpu]->trace_idx; 809 total += tr->data[cpu]->trace_idx;
767 if (tr->data[cpu]->trace_idx > tr->entries) 810 if (tr->data[cpu]->trace_idx > tr->entries)
768 entries += tr->entries; 811 entries += tr->entries;
@@ -975,8 +1018,7 @@ static int trace_empty(struct trace_iterator *iter)
975 for_each_possible_cpu(cpu) { 1018 for_each_possible_cpu(cpu) {
976 data = iter->tr->data[cpu]; 1019 data = iter->tr->data[cpu];
977 1020
978 if (data->trace && 1021 if (head_page(data) && data->trace_idx)
979 data->trace_idx)
980 return 0; 1022 return 0;
981 } 1023 }
982 return 1; 1024 return 1;
@@ -1576,9 +1618,9 @@ static struct tracer no_tracer __read_mostly =
1576static int trace_alloc_page(void) 1618static int trace_alloc_page(void)
1577{ 1619{
1578 struct trace_array_cpu *data; 1620 struct trace_array_cpu *data;
1579 void *array;
1580 struct page *page, *tmp; 1621 struct page *page, *tmp;
1581 LIST_HEAD(pages); 1622 LIST_HEAD(pages);
1623 void *array;
1582 int i; 1624 int i;
1583 1625
1584 /* first allocate a page for each CPU */ 1626 /* first allocate a page for each CPU */
@@ -1610,14 +1652,14 @@ static int trace_alloc_page(void)
1610 for_each_possible_cpu(i) { 1652 for_each_possible_cpu(i) {
1611 data = global_trace.data[i]; 1653 data = global_trace.data[i];
1612 page = list_entry(pages.next, struct page, lru); 1654 page = list_entry(pages.next, struct page, lru);
1613 list_del(&page->lru); 1655 list_del_init(&page->lru);
1614 list_add_tail(&page->lru, &data->trace_pages); 1656 list_add_tail(&page->lru, &data->trace_pages);
1615 ClearPageLRU(page); 1657 ClearPageLRU(page);
1616 1658
1617#ifdef CONFIG_TRACER_MAX_TRACE 1659#ifdef CONFIG_TRACER_MAX_TRACE
1618 data = max_tr.data[i]; 1660 data = max_tr.data[i];
1619 page = list_entry(pages.next, struct page, lru); 1661 page = list_entry(pages.next, struct page, lru);
1620 list_del(&page->lru); 1662 list_del_init(&page->lru);
1621 list_add_tail(&page->lru, &data->trace_pages); 1663 list_add_tail(&page->lru, &data->trace_pages);
1622 SetPageLRU(page); 1664 SetPageLRU(page);
1623#endif 1665#endif
@@ -1628,7 +1670,7 @@ static int trace_alloc_page(void)
1628 1670
1629 free_pages: 1671 free_pages:
1630 list_for_each_entry_safe(page, tmp, &pages, lru) { 1672 list_for_each_entry_safe(page, tmp, &pages, lru) {
1631 list_del(&page->lru); 1673 list_del_init(&page->lru);
1632 __free_page(page); 1674 __free_page(page);
1633 } 1675 }
1634 return -ENOMEM; 1676 return -ENOMEM;
@@ -1654,7 +1696,6 @@ __init static int tracer_alloc_buffers(void)
1654 "for trace buffer!\n"); 1696 "for trace buffer!\n");
1655 goto free_buffers; 1697 goto free_buffers;
1656 } 1698 }
1657 data->trace = array;
1658 1699
1659 /* set the array to the list */ 1700 /* set the array to the list */
1660 INIT_LIST_HEAD(&data->trace_pages); 1701 INIT_LIST_HEAD(&data->trace_pages);
@@ -1671,7 +1712,6 @@ __init static int tracer_alloc_buffers(void)
1671 "for trace buffer!\n"); 1712 "for trace buffer!\n");
1672 goto free_buffers; 1713 goto free_buffers;
1673 } 1714 }
1674 max_tr.data[i]->trace = array;
1675 1715
1676 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 1716 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1677 page = virt_to_page(array); 1717 page = virt_to_page(array);
@@ -1716,24 +1756,22 @@ __init static int tracer_alloc_buffers(void)
1716 struct page *page, *tmp; 1756 struct page *page, *tmp;
1717 struct trace_array_cpu *data = global_trace.data[i]; 1757 struct trace_array_cpu *data = global_trace.data[i];
1718 1758
1719 if (data && data->trace) { 1759 if (data) {
1720 list_for_each_entry_safe(page, tmp, 1760 list_for_each_entry_safe(page, tmp,
1721 &data->trace_pages, lru) { 1761 &data->trace_pages, lru) {
1722 list_del(&page->lru); 1762 list_del_init(&page->lru);
1723 __free_page(page); 1763 __free_page(page);
1724 } 1764 }
1725 data->trace = NULL;
1726 } 1765 }
1727 1766
1728#ifdef CONFIG_TRACER_MAX_TRACE 1767#ifdef CONFIG_TRACER_MAX_TRACE
1729 data = max_tr.data[i]; 1768 data = max_tr.data[i];
1730 if (data && data->trace) { 1769 if (data) {
1731 list_for_each_entry_safe(page, tmp, 1770 list_for_each_entry_safe(page, tmp,
1732 &data->trace_pages, lru) { 1771 &data->trace_pages, lru) {
1733 list_del(&page->lru); 1772 list_del_init(&page->lru);
1734 __free_page(page); 1773 __free_page(page);
1735 } 1774 }
1736 data->trace = NULL;
1737 } 1775 }
1738#endif 1776#endif
1739 } 1777 }