aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c144
-rw-r--r--kernel/trace/trace.h8
-rw-r--r--kernel/trace/trace_irqsoff.c32
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
-rw-r--r--kernel/trace/trace_selftest.c25
7 files changed, 134 insertions, 103 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2c1670c65236..953a36d6a199 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -69,7 +69,7 @@ extern void ftrace_caller(void);
69extern void ftrace_call(void); 69extern void ftrace_call(void);
70extern void mcount_call(void); 70extern void mcount_call(void);
71#else 71#else
72# define ftrace_force_update() do { } while (0) 72# define ftrace_force_update() ({ 0; })
73#endif 73#endif
74 74
75static inline void tracer_disable(void) 75static inline void tracer_disable(void)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4facf5ceeb86..6d4d2e86debc 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1152,10 +1152,10 @@ static int __init notrace ftrace_dynamic_init(void)
1152 1152
1153core_initcall(ftrace_dynamic_init); 1153core_initcall(ftrace_dynamic_init);
1154#else 1154#else
1155# define ftrace_startup() do { } while (0) 1155# define ftrace_startup() do { } while (0)
1156# define ftrace_shutdown() do { } while (0) 1156# define ftrace_shutdown() do { } while (0)
1157# define ftrace_startup_sysctl() do { } while (0) 1157# define ftrace_startup_sysctl() do { } while (0)
1158# define ftrace_shutdown_sysctl() do { } while (0) 1158# define ftrace_shutdown_sysctl() do { } while (0)
1159#endif /* CONFIG_DYNAMIC_FTRACE */ 1159#endif /* CONFIG_DYNAMIC_FTRACE */
1160 1160
1161/** 1161/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f6d026f17dbb..61d2f0228866 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -142,12 +142,59 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
142 tracing_record_cmdline(current); 142 tracing_record_cmdline(current);
143} 143}
144 144
145void check_pages(struct trace_array_cpu *data)
146{
147 struct page *page, *tmp;
148
149 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
150 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
151
152 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
153 BUG_ON(page->lru.next->prev != &page->lru);
154 BUG_ON(page->lru.prev->next != &page->lru);
155 }
156}
157
158void *head_page(struct trace_array_cpu *data)
159{
160 struct page *page;
161
162 check_pages(data);
163 if (list_empty(&data->trace_pages))
164 return NULL;
165
166 page = list_entry(data->trace_pages.next, struct page, lru);
167 BUG_ON(&page->lru == &data->trace_pages);
168
169 return page_address(page);
170}
171
172notrace static void
173flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
174{
175 struct list_head flip_pages;
176
177 INIT_LIST_HEAD(&flip_pages);
178
179 tr1->trace_current = NULL;
180 memcpy(&tr1->trace_current_idx, &tr2->trace_current_idx,
181 sizeof(struct trace_array_cpu) -
182 offsetof(struct trace_array_cpu, trace_current_idx));
183
184 check_pages(tr1);
185 check_pages(tr2);
186 list_splice_init(&tr1->trace_pages, &flip_pages);
187 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
188 list_splice_init(&flip_pages, &tr2->trace_pages);
189 BUG_ON(!list_empty(&flip_pages));
190 check_pages(tr1);
191 check_pages(tr2);
192}
193
145notrace void 194notrace void
146update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 195update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
147{ 196{
148 struct trace_array_cpu *data; 197 struct trace_array_cpu *data;
149 void *save_trace;
150 struct list_head save_pages;
151 int i; 198 int i;
152 199
153 WARN_ON_ONCE(!irqs_disabled()); 200 WARN_ON_ONCE(!irqs_disabled());
@@ -155,11 +202,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
155 /* clear out all the previous traces */ 202 /* clear out all the previous traces */
156 for_each_possible_cpu(i) { 203 for_each_possible_cpu(i) {
157 data = tr->data[i]; 204 data = tr->data[i];
158 save_trace = max_tr.data[i]->trace; 205 flip_trace(max_tr.data[i], data);
159 save_pages = max_tr.data[i]->trace_pages;
160 memcpy(max_tr.data[i], data, sizeof(*data));
161 data->trace = save_trace;
162 data->trace_pages = save_pages;
163 tracing_reset(data); 206 tracing_reset(data);
164 } 207 }
165 208
@@ -177,8 +220,6 @@ notrace void
177update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 220update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
178{ 221{
179 struct trace_array_cpu *data = tr->data[cpu]; 222 struct trace_array_cpu *data = tr->data[cpu];
180 void *save_trace;
181 struct list_head save_pages;
182 int i; 223 int i;
183 224
184 WARN_ON_ONCE(!irqs_disabled()); 225 WARN_ON_ONCE(!irqs_disabled());
@@ -186,11 +227,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
186 for_each_possible_cpu(i) 227 for_each_possible_cpu(i)
187 tracing_reset(max_tr.data[i]); 228 tracing_reset(max_tr.data[i]);
188 229
189 save_trace = max_tr.data[cpu]->trace; 230 flip_trace(max_tr.data[cpu], data);
190 save_pages = max_tr.data[cpu]->trace_pages; 231
191 memcpy(max_tr.data[cpu], data, sizeof(*data));
192 data->trace = save_trace;
193 data->trace_pages = save_pages;
194 tracing_reset(data); 232 tracing_reset(data);
195 233
196 __update_max_tr(tr, tsk, cpu); 234 __update_max_tr(tr, tsk, cpu);
@@ -234,9 +272,9 @@ int register_tracer(struct tracer *type)
234 * If we fail, we do not register this tracer. 272 * If we fail, we do not register this tracer.
235 */ 273 */
236 for_each_possible_cpu(i) { 274 for_each_possible_cpu(i) {
237 if (!data->trace)
238 continue;
239 data = tr->data[i]; 275 data = tr->data[i];
276 if (!head_page(data))
277 continue;
240 tracing_reset(data); 278 tracing_reset(data);
241 } 279 }
242 current_trace = type; 280 current_trace = type;
@@ -298,7 +336,7 @@ void unregister_tracer(struct tracer *type)
298void notrace tracing_reset(struct trace_array_cpu *data) 336void notrace tracing_reset(struct trace_array_cpu *data)
299{ 337{
300 data->trace_idx = 0; 338 data->trace_idx = 0;
301 data->trace_current = data->trace; 339 data->trace_current = head_page(data);
302 data->trace_current_idx = 0; 340 data->trace_current_idx = 0;
303} 341}
304 342
@@ -425,26 +463,31 @@ notrace void tracing_record_cmdline(struct task_struct *tsk)
425} 463}
426 464
427static inline notrace struct trace_entry * 465static inline notrace struct trace_entry *
428tracing_get_trace_entry(struct trace_array *tr, 466tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
429 struct trace_array_cpu *data)
430{ 467{
431 unsigned long idx, idx_next; 468 unsigned long idx, idx_next;
432 struct trace_entry *entry; 469 struct trace_entry *entry;
433 struct page *page;
434 struct list_head *next; 470 struct list_head *next;
471 struct page *page;
435 472
436 data->trace_idx++; 473 data->trace_idx++;
437 idx = data->trace_current_idx; 474 idx = data->trace_current_idx;
438 idx_next = idx + 1; 475 idx_next = idx + 1;
439 476
477 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
478
440 entry = data->trace_current + idx * TRACE_ENTRY_SIZE; 479 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
441 480
442 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) { 481 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
443 page = virt_to_page(data->trace_current); 482 page = virt_to_page(data->trace_current);
444 if (unlikely(&page->lru == data->trace_pages.prev)) 483 /*
445 next = data->trace_pages.next; 484 * Roundrobin - but skip the head (which is not a real page):
446 else 485 */
447 next = page->lru.next; 486 next = page->lru.next;
487 if (unlikely(next == &data->trace_pages))
488 next = next->next;
489 BUG_ON(next == &data->trace_pages);
490
448 page = list_entry(next, struct page, lru); 491 page = list_entry(next, struct page, lru);
449 data->trace_current = page_address(page); 492 data->trace_current = page_address(page);
450 idx_next = 0; 493 idx_next = 0;
@@ -456,18 +499,17 @@ tracing_get_trace_entry(struct trace_array *tr,
456} 499}
457 500
458static inline notrace void 501static inline notrace void
459tracing_generic_entry_update(struct trace_entry *entry, 502tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
460 unsigned long flags)
461{ 503{
462 struct task_struct *tsk = current; 504 struct task_struct *tsk = current;
463 unsigned long pc; 505 unsigned long pc;
464 506
465 pc = preempt_count(); 507 pc = preempt_count();
466 508
467 entry->idx = atomic_inc_return(&tracer_counter); 509 entry->idx = atomic_inc_return(&tracer_counter);
468 entry->preempt_count = pc & 0xff; 510 entry->preempt_count = pc & 0xff;
469 entry->pid = tsk->pid; 511 entry->pid = tsk->pid;
470 entry->t = now(raw_smp_processor_id()); 512 entry->t = now(raw_smp_processor_id());
471 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 513 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
472 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 514 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
473 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 515 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -476,16 +518,15 @@ tracing_generic_entry_update(struct trace_entry *entry,
476 518
477notrace void 519notrace void
478ftrace(struct trace_array *tr, struct trace_array_cpu *data, 520ftrace(struct trace_array *tr, struct trace_array_cpu *data,
479 unsigned long ip, unsigned long parent_ip, 521 unsigned long ip, unsigned long parent_ip, unsigned long flags)
480 unsigned long flags)
481{ 522{
482 struct trace_entry *entry; 523 struct trace_entry *entry;
483 524
484 entry = tracing_get_trace_entry(tr, data); 525 entry = tracing_get_trace_entry(tr, data);
485 tracing_generic_entry_update(entry, flags); 526 tracing_generic_entry_update(entry, flags);
486 entry->type = TRACE_FN; 527 entry->type = TRACE_FN;
487 entry->fn.ip = ip; 528 entry->fn.ip = ip;
488 entry->fn.parent_ip = parent_ip; 529 entry->fn.parent_ip = parent_ip;
489} 530}
490 531
491notrace void 532notrace void
@@ -496,7 +537,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
496{ 537{
497 struct trace_entry *entry; 538 struct trace_entry *entry;
498 539
499 entry = tracing_get_trace_entry(tr, data); 540 entry = tracing_get_trace_entry(tr, data);
500 tracing_generic_entry_update(entry, flags); 541 tracing_generic_entry_update(entry, flags);
501 entry->type = TRACE_CTX; 542 entry->type = TRACE_CTX;
502 entry->ctx.prev_pid = prev->pid; 543 entry->ctx.prev_pid = prev->pid;
@@ -540,6 +581,8 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
540 } 581 }
541 582
542 page = list_entry(iter->next_page[cpu], struct page, lru); 583 page = list_entry(iter->next_page[cpu], struct page, lru);
584 BUG_ON(&data->trace_pages == &page->lru);
585
543 array = page_address(page); 586 array = page_address(page);
544 587
545 return &array[iter->next_page_idx[cpu]]; 588 return &array[iter->next_page_idx[cpu]];
@@ -554,7 +597,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu)
554 int cpu; 597 int cpu;
555 598
556 for_each_possible_cpu(cpu) { 599 for_each_possible_cpu(cpu) {
557 if (!tr->data[cpu]->trace) 600 if (!head_page(tr->data[cpu]))
558 continue; 601 continue;
559 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 602 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
560 if (ent && 603 if (ent &&
@@ -762,7 +805,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
762 name = type->name; 805 name = type->name;
763 806
764 for_each_possible_cpu(cpu) { 807 for_each_possible_cpu(cpu) {
765 if (tr->data[cpu]->trace) { 808 if (head_page(tr->data[cpu])) {
766 total += tr->data[cpu]->trace_idx; 809 total += tr->data[cpu]->trace_idx;
767 if (tr->data[cpu]->trace_idx > tr->entries) 810 if (tr->data[cpu]->trace_idx > tr->entries)
768 entries += tr->entries; 811 entries += tr->entries;
@@ -975,8 +1018,7 @@ static int trace_empty(struct trace_iterator *iter)
975 for_each_possible_cpu(cpu) { 1018 for_each_possible_cpu(cpu) {
976 data = iter->tr->data[cpu]; 1019 data = iter->tr->data[cpu];
977 1020
978 if (data->trace && 1021 if (head_page(data) && data->trace_idx)
979 data->trace_idx)
980 return 0; 1022 return 0;
981 } 1023 }
982 return 1; 1024 return 1;
@@ -1576,9 +1618,9 @@ static struct tracer no_tracer __read_mostly =
1576static int trace_alloc_page(void) 1618static int trace_alloc_page(void)
1577{ 1619{
1578 struct trace_array_cpu *data; 1620 struct trace_array_cpu *data;
1579 void *array;
1580 struct page *page, *tmp; 1621 struct page *page, *tmp;
1581 LIST_HEAD(pages); 1622 LIST_HEAD(pages);
1623 void *array;
1582 int i; 1624 int i;
1583 1625
1584 /* first allocate a page for each CPU */ 1626 /* first allocate a page for each CPU */
@@ -1610,14 +1652,14 @@ static int trace_alloc_page(void)
1610 for_each_possible_cpu(i) { 1652 for_each_possible_cpu(i) {
1611 data = global_trace.data[i]; 1653 data = global_trace.data[i];
1612 page = list_entry(pages.next, struct page, lru); 1654 page = list_entry(pages.next, struct page, lru);
1613 list_del(&page->lru); 1655 list_del_init(&page->lru);
1614 list_add_tail(&page->lru, &data->trace_pages); 1656 list_add_tail(&page->lru, &data->trace_pages);
1615 ClearPageLRU(page); 1657 ClearPageLRU(page);
1616 1658
1617#ifdef CONFIG_TRACER_MAX_TRACE 1659#ifdef CONFIG_TRACER_MAX_TRACE
1618 data = max_tr.data[i]; 1660 data = max_tr.data[i];
1619 page = list_entry(pages.next, struct page, lru); 1661 page = list_entry(pages.next, struct page, lru);
1620 list_del(&page->lru); 1662 list_del_init(&page->lru);
1621 list_add_tail(&page->lru, &data->trace_pages); 1663 list_add_tail(&page->lru, &data->trace_pages);
1622 SetPageLRU(page); 1664 SetPageLRU(page);
1623#endif 1665#endif
@@ -1628,7 +1670,7 @@ static int trace_alloc_page(void)
1628 1670
1629 free_pages: 1671 free_pages:
1630 list_for_each_entry_safe(page, tmp, &pages, lru) { 1672 list_for_each_entry_safe(page, tmp, &pages, lru) {
1631 list_del(&page->lru); 1673 list_del_init(&page->lru);
1632 __free_page(page); 1674 __free_page(page);
1633 } 1675 }
1634 return -ENOMEM; 1676 return -ENOMEM;
@@ -1654,7 +1696,6 @@ __init static int tracer_alloc_buffers(void)
1654 "for trace buffer!\n"); 1696 "for trace buffer!\n");
1655 goto free_buffers; 1697 goto free_buffers;
1656 } 1698 }
1657 data->trace = array;
1658 1699
1659 /* set the array to the list */ 1700 /* set the array to the list */
1660 INIT_LIST_HEAD(&data->trace_pages); 1701 INIT_LIST_HEAD(&data->trace_pages);
@@ -1671,7 +1712,6 @@ __init static int tracer_alloc_buffers(void)
1671 "for trace buffer!\n"); 1712 "for trace buffer!\n");
1672 goto free_buffers; 1713 goto free_buffers;
1673 } 1714 }
1674 max_tr.data[i]->trace = array;
1675 1715
1676 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 1716 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1677 page = virt_to_page(array); 1717 page = virt_to_page(array);
@@ -1716,24 +1756,22 @@ __init static int tracer_alloc_buffers(void)
1716 struct page *page, *tmp; 1756 struct page *page, *tmp;
1717 struct trace_array_cpu *data = global_trace.data[i]; 1757 struct trace_array_cpu *data = global_trace.data[i];
1718 1758
1719 if (data && data->trace) { 1759 if (data) {
1720 list_for_each_entry_safe(page, tmp, 1760 list_for_each_entry_safe(page, tmp,
1721 &data->trace_pages, lru) { 1761 &data->trace_pages, lru) {
1722 list_del(&page->lru); 1762 list_del_init(&page->lru);
1723 __free_page(page); 1763 __free_page(page);
1724 } 1764 }
1725 data->trace = NULL;
1726 } 1765 }
1727 1766
1728#ifdef CONFIG_TRACER_MAX_TRACE 1767#ifdef CONFIG_TRACER_MAX_TRACE
1729 data = max_tr.data[i]; 1768 data = max_tr.data[i];
1730 if (data && data->trace) { 1769 if (data) {
1731 list_for_each_entry_safe(page, tmp, 1770 list_for_each_entry_safe(page, tmp,
1732 &data->trace_pages, lru) { 1771 &data->trace_pages, lru) {
1733 list_del(&page->lru); 1772 list_del_init(&page->lru);
1734 __free_page(page); 1773 __free_page(page);
1735 } 1774 }
1736 data->trace = NULL;
1737 } 1775 }
1738#endif 1776#endif
1739 } 1777 }
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 88edbf1f6788..cc1d34b8b771 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -53,12 +53,12 @@ struct trace_entry {
53 * the trace, etc.) 53 * the trace, etc.)
54 */ 54 */
55struct trace_array_cpu { 55struct trace_array_cpu {
56 void *trace;
57 void *trace_current; 56 void *trace_current;
58 unsigned trace_current_idx;
59 struct list_head trace_pages; 57 struct list_head trace_pages;
60 unsigned long trace_idx;
61 atomic_t disabled; 58 atomic_t disabled;
59 /* these fields get copied into max-trace: */
60 unsigned trace_current_idx;
61 unsigned long trace_idx;
62 unsigned long saved_latency; 62 unsigned long saved_latency;
63 unsigned long critical_start; 63 unsigned long critical_start;
64 unsigned long critical_end; 64 unsigned long critical_end;
@@ -216,4 +216,6 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
216#endif 216#endif
217#endif /* CONFIG_FTRACE_STARTUP_TEST */ 217#endif /* CONFIG_FTRACE_STARTUP_TEST */
218 218
219extern void *head_page(struct trace_array_cpu *data);
220
219#endif /* _LINUX_KERNEL_TRACE_H */ 221#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 14183b8f79c5..2dfebb67fdfb 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -144,7 +144,7 @@ check_critical_timing(struct trace_array *tr,
144 if (!report_latency(delta)) 144 if (!report_latency(delta))
145 goto out; 145 goto out;
146 146
147 spin_lock(&max_trace_lock); 147 spin_lock_irqsave(&max_trace_lock, flags);
148 148
149 /* check if we are still the max latency */ 149 /* check if we are still the max latency */
150 if (!report_latency(delta)) 150 if (!report_latency(delta))
@@ -165,32 +165,24 @@ check_critical_timing(struct trace_array *tr,
165 165
166 update_max_tr_single(tr, current, cpu); 166 update_max_tr_single(tr, current, cpu);
167 167
168 if (tracing_thresh) 168 if (tracing_thresh) {
169 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section " 169 printk(KERN_INFO "(%16s-%-5d|#%d):"
170 "violates %lu us threshold.\n" 170 " %lu us critical section violates %lu us threshold.\n",
171 " => started at timestamp %lu: ",
172 current->comm, current->pid, 171 current->comm, current->pid,
173 raw_smp_processor_id(), 172 raw_smp_processor_id(),
174 latency, nsecs_to_usecs(tracing_thresh), t0); 173 latency, nsecs_to_usecs(tracing_thresh));
175 else 174 } else {
176 printk(KERN_INFO "(%16s-%-5d|#%d):" 175 printk(KERN_INFO "(%16s-%-5d|#%d):"
177 " new %lu us maximum-latency " 176 " new %lu us maximum-latency critical section.\n",
178 "critical section.\n => started at timestamp %lu: ",
179 current->comm, current->pid, 177 current->comm, current->pid,
180 raw_smp_processor_id(), 178 raw_smp_processor_id(),
181 latency, t0); 179 latency);
182 180 }
183 print_symbol(KERN_CONT "<%s>\n", data->critical_start);
184 printk(KERN_CONT " => ended at timestamp %lu: ", t1);
185 print_symbol(KERN_CONT "<%s>\n", data->critical_end);
186 dump_stack();
187 t1 = nsecs_to_usecs(now(cpu));
188 printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
189 181
190 max_sequence++; 182 max_sequence++;
191 183
192out_unlock: 184out_unlock:
193 spin_unlock(&max_trace_lock); 185 spin_unlock_irqrestore(&max_trace_lock, flags);
194 186
195out: 187out:
196 data->critical_sequence = max_sequence; 188 data->critical_sequence = max_sequence;
@@ -216,7 +208,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
216 cpu = raw_smp_processor_id(); 208 cpu = raw_smp_processor_id();
217 data = tr->data[cpu]; 209 data = tr->data[cpu];
218 210
219 if (unlikely(!data) || unlikely(!data->trace) || 211 if (unlikely(!data) || unlikely(!head_page(data)) ||
220 atomic_read(&data->disabled)) 212 atomic_read(&data->disabled))
221 return; 213 return;
222 214
@@ -256,7 +248,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
256 cpu = raw_smp_processor_id(); 248 cpu = raw_smp_processor_id();
257 data = tr->data[cpu]; 249 data = tr->data[cpu];
258 250
259 if (unlikely(!data) || unlikely(!data->trace) || 251 if (unlikely(!data) || unlikely(!head_page(data)) ||
260 !data->critical_start || atomic_read(&data->disabled)) 252 !data->critical_start || atomic_read(&data->disabled))
261 return; 253 return;
262 254
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3d10ff01f805..688df965f3f2 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -107,24 +107,18 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
107 update_max_tr(tr, wakeup_task, wakeup_cpu); 107 update_max_tr(tr, wakeup_task, wakeup_cpu);
108 108
109 if (tracing_thresh) { 109 if (tracing_thresh) {
110 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us wakeup latency " 110 printk(KERN_INFO "(%16s-%-5d|#%d):"
111 "violates %lu us threshold.\n" 111 " %lu us wakeup latency violates %lu us threshold.\n",
112 " => started at timestamp %lu: ",
113 wakeup_task->comm, wakeup_task->pid, 112 wakeup_task->comm, wakeup_task->pid,
114 raw_smp_processor_id(), 113 raw_smp_processor_id(),
115 latency, nsecs_to_usecs(tracing_thresh), t0); 114 latency, nsecs_to_usecs(tracing_thresh));
116 } else { 115 } else {
117 printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us maximum " 116 printk(KERN_INFO "(%16s-%-5d|#%d):"
118 "wakeup latency.\n => started at timestamp %lu: ", 117 " new %lu us maximum wakeup latency.\n",
119 wakeup_task->comm, wakeup_task->pid, 118 wakeup_task->comm, wakeup_task->pid,
120 cpu, latency, t0); 119 cpu, latency);
121 } 120 }
122 121
123 printk(KERN_CONT " ended at timestamp %lu: ", t1);
124 dump_stack();
125 t1 = nsecs_to_usecs(now(cpu));
126 printk(KERN_CONT " dump-end timestamp %lu\n\n", t1);
127
128out_unlock: 122out_unlock:
129 __wakeup_reset(tr); 123 __wakeup_reset(tr);
130 spin_unlock_irqrestore(&wakeup_lock, flags); 124 spin_unlock_irqrestore(&wakeup_lock, flags);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index ef4d3cc009f5..c01874c3b1f9 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1,6 +1,7 @@
1/* Include in trace.c */ 1/* Include in trace.c */
2 2
3#include <linux/kthread.h> 3#include <linux/kthread.h>
4#include <linux/delay.h>
4 5
5static inline int trace_valid_entry(struct trace_entry *entry) 6static inline int trace_valid_entry(struct trace_entry *entry)
6{ 7{
@@ -15,28 +16,29 @@ static inline int trace_valid_entry(struct trace_entry *entry)
15static int 16static int
16trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) 17trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
17{ 18{
18 struct page *page;
19 struct trace_entry *entries; 19 struct trace_entry *entries;
20 struct page *page;
20 int idx = 0; 21 int idx = 0;
21 int i; 22 int i;
22 23
24 BUG_ON(list_empty(&data->trace_pages));
23 page = list_entry(data->trace_pages.next, struct page, lru); 25 page = list_entry(data->trace_pages.next, struct page, lru);
24 entries = page_address(page); 26 entries = page_address(page);
25 27
26 if (data->trace != entries) 28 if (head_page(data) != entries)
27 goto failed; 29 goto failed;
28 30
29 /* 31 /*
30 * The starting trace buffer always has valid elements, 32 * The starting trace buffer always has valid elements,
31 * if any element exits. 33 * if any element exists.
32 */ 34 */
33 entries = data->trace; 35 entries = head_page(data);
34 36
35 for (i = 0; i < tr->entries; i++) { 37 for (i = 0; i < tr->entries; i++) {
36 38
37 if (i < data->trace_idx && 39 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
38 !trace_valid_entry(&entries[idx])) { 40 printk(KERN_CONT ".. invalid entry %d ",
39 printk(KERN_CONT ".. invalid entry %d ", entries[idx].type); 41 entries[idx].type);
40 goto failed; 42 goto failed;
41 } 43 }
42 44
@@ -80,11 +82,10 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
80 int ret = 0; 82 int ret = 0;
81 83
82 for_each_possible_cpu(cpu) { 84 for_each_possible_cpu(cpu) {
83 if (!tr->data[cpu]->trace) 85 if (!head_page(tr->data[cpu]))
84 continue; 86 continue;
85 87
86 cnt += tr->data[cpu]->trace_idx; 88 cnt += tr->data[cpu]->trace_idx;
87 printk("%d: count = %ld\n", cpu, cnt);
88 89
89 ret = trace_test_buffer_cpu(tr, tr->data[cpu]); 90 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
90 if (ret) 91 if (ret)
@@ -117,6 +118,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
117 } 118 }
118 119
119 /* start the tracing */ 120 /* start the tracing */
121 ftrace_enabled = 1;
122
120 tr->ctrl = 1; 123 tr->ctrl = 1;
121 trace->init(tr); 124 trace->init(tr);
122 /* Sleep for a 1/10 of a second */ 125 /* Sleep for a 1/10 of a second */
@@ -124,6 +127,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
124 /* stop the tracing. */ 127 /* stop the tracing. */
125 tr->ctrl = 0; 128 tr->ctrl = 0;
126 trace->ctrl_update(tr); 129 trace->ctrl_update(tr);
130 ftrace_enabled = 0;
131
127 /* check the trace buffer */ 132 /* check the trace buffer */
128 ret = trace_test_buffer(tr, &count); 133 ret = trace_test_buffer(tr, &count);
129 trace->reset(tr); 134 trace->reset(tr);
@@ -328,7 +333,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
328 333
329 /* create a high prio thread */ 334 /* create a high prio thread */
330 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); 335 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
331 if (!IS_ERR(p)) { 336 if (IS_ERR(p)) {
332 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 337 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
333 return -1; 338 return -1;
334 } 339 }