aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-08-19 06:48:09 -0400
committerIngo Molnar <mingo@elte.hu>2010-08-19 06:48:09 -0400
commitc8710ad38900153af7a3e6762e99c062cfa46443 (patch)
treea0c0632274c4eb72f51e99a5861f71cffe65ea60 /kernel/trace
parent6016ee13db518ab1cd0cbf43fc2ad5712021e338 (diff)
parent86397dc3ccfc0e17b7550d05eaf15fe91f6498dd (diff)
Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig15
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/blktrace.c88
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c62
-rw-r--r--kernel/trace/trace.h19
-rw-r--r--kernel/trace/trace_clock.c2
-rw-r--r--kernel/trace/trace_events.c188
-rw-r--r--kernel/trace/trace_functions_graph.c10
-rw-r--r--kernel/trace/trace_kdb.c136
10 files changed, 398 insertions, 128 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index c7683fd8a03..538501c6ea5 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -153,7 +153,7 @@ config IRQSOFF_TRACER
153 bool "Interrupts-off Latency Tracer" 153 bool "Interrupts-off Latency Tracer"
154 default n 154 default n
155 depends on TRACE_IRQFLAGS_SUPPORT 155 depends on TRACE_IRQFLAGS_SUPPORT
156 depends on GENERIC_TIME 156 depends on !ARCH_USES_GETTIMEOFFSET
157 select TRACE_IRQFLAGS 157 select TRACE_IRQFLAGS
158 select GENERIC_TRACER 158 select GENERIC_TRACER
159 select TRACER_MAX_TRACE 159 select TRACER_MAX_TRACE
@@ -175,7 +175,7 @@ config IRQSOFF_TRACER
175config PREEMPT_TRACER 175config PREEMPT_TRACER
176 bool "Preemption-off Latency Tracer" 176 bool "Preemption-off Latency Tracer"
177 default n 177 default n
178 depends on GENERIC_TIME 178 depends on !ARCH_USES_GETTIMEOFFSET
179 depends on PREEMPT 179 depends on PREEMPT
180 select GENERIC_TRACER 180 select GENERIC_TRACER
181 select TRACER_MAX_TRACE 181 select TRACER_MAX_TRACE
@@ -323,17 +323,6 @@ config STACK_TRACER
323 323
324 Say N if unsure. 324 Say N if unsure.
325 325
326config WORKQUEUE_TRACER
327 bool "Trace workqueues"
328 select GENERIC_TRACER
329 help
330 The workqueue tracer provides some statistical information
331 about each cpu workqueue thread such as the number of the
332 works inserted and executed since their creation. It can help
333 to evaluate the amount of work each of them has to perform.
334 For example it can help a developer to decide whether he should
335 choose a per-cpu workqueue instead of a singlethreaded one.
336
337config BLK_DEV_IO_TRACE 326config BLK_DEV_IO_TRACE
338 bool "Support for tracing block IO actions" 327 bool "Support for tracing block IO actions"
339 depends on SYSFS 328 depends on SYSFS
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 438e84a56ab..53f338190b2 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -53,5 +53,8 @@ endif
53obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 53obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
54obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 54obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
55obj-$(CONFIG_EVENT_TRACING) += power-traces.o 55obj-$(CONFIG_EVENT_TRACING) += power-traces.o
56ifeq ($(CONFIG_TRACING),y)
57obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
58endif
56 59
57libftrace-y := ftrace.o 60libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 638711c1750..959f8d6c8cc 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
169static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 169static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
170 BLK_TC_ACT(BLK_TC_WRITE) }; 170 BLK_TC_ACT(BLK_TC_WRITE) };
171 171
172#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
173#define BLK_TC_RAHEAD BLK_TC_AHEAD
174
172/* The ilog2() calls fall out because they're constant */ 175/* The ilog2() calls fall out because they're constant */
173#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ 176#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
174 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) 177 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
175 178
176/* 179/*
177 * The worker for the various blk_add_trace*() types. Fills out a 180 * The worker for the various blk_add_trace*() types. Fills out a
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
194 return; 197 return;
195 198
196 what |= ddir_act[rw & WRITE]; 199 what |= ddir_act[rw & WRITE];
197 what |= MASK_TC_BIT(rw, BARRIER); 200 what |= MASK_TC_BIT(rw, HARDBARRIER);
198 what |= MASK_TC_BIT(rw, SYNCIO); 201 what |= MASK_TC_BIT(rw, SYNC);
199 what |= MASK_TC_BIT(rw, AHEAD); 202 what |= MASK_TC_BIT(rw, RAHEAD);
200 what |= MASK_TC_BIT(rw, META); 203 what |= MASK_TC_BIT(rw, META);
201 what |= MASK_TC_BIT(rw, DISCARD); 204 what |= MASK_TC_BIT(rw, DISCARD);
202 205
@@ -549,6 +552,41 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
549} 552}
550EXPORT_SYMBOL_GPL(blk_trace_setup); 553EXPORT_SYMBOL_GPL(blk_trace_setup);
551 554
555#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
556static int compat_blk_trace_setup(struct request_queue *q, char *name,
557 dev_t dev, struct block_device *bdev,
558 char __user *arg)
559{
560 struct blk_user_trace_setup buts;
561 struct compat_blk_user_trace_setup cbuts;
562 int ret;
563
564 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
565 return -EFAULT;
566
567 buts = (struct blk_user_trace_setup) {
568 .act_mask = cbuts.act_mask,
569 .buf_size = cbuts.buf_size,
570 .buf_nr = cbuts.buf_nr,
571 .start_lba = cbuts.start_lba,
572 .end_lba = cbuts.end_lba,
573 .pid = cbuts.pid,
574 };
575 memcpy(&buts.name, &cbuts.name, 32);
576
577 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
578 if (ret)
579 return ret;
580
581 if (copy_to_user(arg, &buts.name, 32)) {
582 blk_trace_remove(q);
583 return -EFAULT;
584 }
585
586 return 0;
587}
588#endif
589
552int blk_trace_startstop(struct request_queue *q, int start) 590int blk_trace_startstop(struct request_queue *q, int start)
553{ 591{
554 int ret; 592 int ret;
@@ -601,6 +639,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
601 if (!q) 639 if (!q)
602 return -ENXIO; 640 return -ENXIO;
603 641
642 lock_kernel();
604 mutex_lock(&bdev->bd_mutex); 643 mutex_lock(&bdev->bd_mutex);
605 644
606 switch (cmd) { 645 switch (cmd) {
@@ -608,6 +647,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
608 bdevname(bdev, b); 647 bdevname(bdev, b);
609 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 648 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
610 break; 649 break;
650#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
651 case BLKTRACESETUP32:
652 bdevname(bdev, b);
653 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
654 break;
655#endif
611 case BLKTRACESTART: 656 case BLKTRACESTART:
612 start = 1; 657 start = 1;
613 case BLKTRACESTOP: 658 case BLKTRACESTOP:
@@ -622,6 +667,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
622 } 667 }
623 668
624 mutex_unlock(&bdev->bd_mutex); 669 mutex_unlock(&bdev->bd_mutex);
670 unlock_kernel();
625 return ret; 671 return ret;
626} 672}
627 673
@@ -661,10 +707,13 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
661 if (likely(!bt)) 707 if (likely(!bt))
662 return; 708 return;
663 709
664 if (blk_discard_rq(rq)) 710 if (rq->cmd_flags & REQ_DISCARD)
665 rw |= (1 << BIO_RW_DISCARD); 711 rw |= REQ_DISCARD;
712
713 if (rq->cmd_flags & REQ_SECURE)
714 rw |= REQ_SECURE;
666 715
667 if (blk_pc_request(rq)) { 716 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
668 what |= BLK_TC_ACT(BLK_TC_PC); 717 what |= BLK_TC_ACT(BLK_TC_PC);
669 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 718 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
670 what, rq->errors, rq->cmd_len, rq->cmd); 719 what, rq->errors, rq->cmd_len, rq->cmd);
@@ -925,7 +974,7 @@ void blk_add_driver_data(struct request_queue *q,
925 if (likely(!bt)) 974 if (likely(!bt))
926 return; 975 return;
927 976
928 if (blk_pc_request(rq)) 977 if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
929 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 978 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
930 BLK_TA_DRV_DATA, rq->errors, len, data); 979 BLK_TA_DRV_DATA, rq->errors, len, data);
931 else 980 else
@@ -1730,7 +1779,7 @@ void blk_dump_cmd(char *buf, struct request *rq)
1730 int len = rq->cmd_len; 1779 int len = rq->cmd_len;
1731 unsigned char *cmd = rq->cmd; 1780 unsigned char *cmd = rq->cmd;
1732 1781
1733 if (!blk_pc_request(rq)) { 1782 if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1734 buf[0] = '\0'; 1783 buf[0] = '\0';
1735 return; 1784 return;
1736 } 1785 }
@@ -1755,21 +1804,23 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1755 1804
1756 if (rw & WRITE) 1805 if (rw & WRITE)
1757 rwbs[i++] = 'W'; 1806 rwbs[i++] = 'W';
1758 else if (rw & 1 << BIO_RW_DISCARD) 1807 else if (rw & REQ_DISCARD)
1759 rwbs[i++] = 'D'; 1808 rwbs[i++] = 'D';
1760 else if (bytes) 1809 else if (bytes)
1761 rwbs[i++] = 'R'; 1810 rwbs[i++] = 'R';
1762 else 1811 else
1763 rwbs[i++] = 'N'; 1812 rwbs[i++] = 'N';
1764 1813
1765 if (rw & 1 << BIO_RW_AHEAD) 1814 if (rw & REQ_RAHEAD)
1766 rwbs[i++] = 'A'; 1815 rwbs[i++] = 'A';
1767 if (rw & 1 << BIO_RW_BARRIER) 1816 if (rw & REQ_HARDBARRIER)
1768 rwbs[i++] = 'B'; 1817 rwbs[i++] = 'B';
1769 if (rw & 1 << BIO_RW_SYNCIO) 1818 if (rw & REQ_SYNC)
1770 rwbs[i++] = 'S'; 1819 rwbs[i++] = 'S';
1771 if (rw & 1 << BIO_RW_META) 1820 if (rw & REQ_META)
1772 rwbs[i++] = 'M'; 1821 rwbs[i++] = 'M';
1822 if (rw & REQ_SECURE)
1823 rwbs[i++] = 'E';
1773 1824
1774 rwbs[i] = '\0'; 1825 rwbs[i] = '\0';
1775} 1826}
@@ -1779,8 +1830,11 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1779 int rw = rq->cmd_flags & 0x03; 1830 int rw = rq->cmd_flags & 0x03;
1780 int bytes; 1831 int bytes;
1781 1832
1782 if (blk_discard_rq(rq)) 1833 if (rq->cmd_flags & REQ_DISCARD)
1783 rw |= (1 << BIO_RW_DISCARD); 1834 rw |= REQ_DISCARD;
1835
1836 if (rq->cmd_flags & REQ_SECURE)
1837 rw |= REQ_SECURE;
1784 1838
1785 bytes = blk_rq_bytes(rq); 1839 bytes = blk_rq_bytes(rq);
1786 1840
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3632ce87674..19cccc3c302 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3846 rpos = reader->read; 3846 rpos = reader->read;
3847 pos += size; 3847 pos += size;
3848 3848
3849 if (rpos >= commit)
3850 break;
3851
3849 event = rb_reader_event(cpu_buffer); 3852 event = rb_reader_event(cpu_buffer);
3850 size = rb_event_length(event); 3853 size = rb_event_length(event);
3851 } while (len > size); 3854 } while (len > size);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4b1122d0df3..9ec59f54115 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void)
101 preempt_enable(); 101 preempt_enable();
102} 102}
103 103
104static cpumask_var_t __read_mostly tracing_buffer_mask; 104cpumask_var_t __read_mostly tracing_buffer_mask;
105
106#define for_each_tracing_cpu(cpu) \
107 for_each_cpu(cpu, tracing_buffer_mask)
108 105
109/* 106/*
110 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 107 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -744,13 +741,6 @@ __acquires(kernel_lock)
744 return -1; 741 return -1;
745 } 742 }
746 743
747 /*
748 * When this gets called we hold the BKL which means that
749 * preemption is disabled. Various trace selftests however
750 * need to disable and enable preemption for successful tests.
751 * So we drop the BKL here and grab it after the tests again.
752 */
753 unlock_kernel();
754 mutex_lock(&trace_types_lock); 744 mutex_lock(&trace_types_lock);
755 745
756 tracing_selftest_running = true; 746 tracing_selftest_running = true;
@@ -832,7 +822,6 @@ __acquires(kernel_lock)
832#endif 822#endif
833 823
834 out_unlock: 824 out_unlock:
835 lock_kernel();
836 return ret; 825 return ret;
837} 826}
838 827
@@ -1493,11 +1482,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1493} 1482}
1494EXPORT_SYMBOL_GPL(trace_vprintk); 1483EXPORT_SYMBOL_GPL(trace_vprintk);
1495 1484
1496enum trace_file_type {
1497 TRACE_FILE_LAT_FMT = 1,
1498 TRACE_FILE_ANNOTATE = 2,
1499};
1500
1501static void trace_iterator_increment(struct trace_iterator *iter) 1485static void trace_iterator_increment(struct trace_iterator *iter)
1502{ 1486{
1503 /* Don't allow ftrace to trace into the ring buffers */ 1487 /* Don't allow ftrace to trace into the ring buffers */
@@ -1595,7 +1579,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1595} 1579}
1596 1580
1597/* Find the next real entry, and increment the iterator to the next entry */ 1581/* Find the next real entry, and increment the iterator to the next entry */
1598static void *find_next_entry_inc(struct trace_iterator *iter) 1582void *trace_find_next_entry_inc(struct trace_iterator *iter)
1599{ 1583{
1600 iter->ent = __find_next_entry(iter, &iter->cpu, 1584 iter->ent = __find_next_entry(iter, &iter->cpu,
1601 &iter->lost_events, &iter->ts); 1585 &iter->lost_events, &iter->ts);
@@ -1630,19 +1614,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1630 return NULL; 1614 return NULL;
1631 1615
1632 if (iter->idx < 0) 1616 if (iter->idx < 0)
1633 ent = find_next_entry_inc(iter); 1617 ent = trace_find_next_entry_inc(iter);
1634 else 1618 else
1635 ent = iter; 1619 ent = iter;
1636 1620
1637 while (ent && iter->idx < i) 1621 while (ent && iter->idx < i)
1638 ent = find_next_entry_inc(iter); 1622 ent = trace_find_next_entry_inc(iter);
1639 1623
1640 iter->pos = *pos; 1624 iter->pos = *pos;
1641 1625
1642 return ent; 1626 return ent;
1643} 1627}
1644 1628
1645static void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1629void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1646{ 1630{
1647 struct trace_array *tr = iter->tr; 1631 struct trace_array *tr = iter->tr;
1648 struct ring_buffer_event *event; 1632 struct ring_buffer_event *event;
@@ -2003,7 +1987,7 @@ int trace_empty(struct trace_iterator *iter)
2003} 1987}
2004 1988
2005/* Called with trace_event_read_lock() held. */ 1989/* Called with trace_event_read_lock() held. */
2006static enum print_line_t print_trace_line(struct trace_iterator *iter) 1990enum print_line_t print_trace_line(struct trace_iterator *iter)
2007{ 1991{
2008 enum print_line_t ret; 1992 enum print_line_t ret;
2009 1993
@@ -3193,7 +3177,7 @@ waitagain:
3193 3177
3194 trace_event_read_lock(); 3178 trace_event_read_lock();
3195 trace_access_lock(iter->cpu_file); 3179 trace_access_lock(iter->cpu_file);
3196 while (find_next_entry_inc(iter) != NULL) { 3180 while (trace_find_next_entry_inc(iter) != NULL) {
3197 enum print_line_t ret; 3181 enum print_line_t ret;
3198 int len = iter->seq.len; 3182 int len = iter->seq.len;
3199 3183
@@ -3276,7 +3260,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3276 if (ret != TRACE_TYPE_NO_CONSUME) 3260 if (ret != TRACE_TYPE_NO_CONSUME)
3277 trace_consume(iter); 3261 trace_consume(iter);
3278 rem -= count; 3262 rem -= count;
3279 if (!find_next_entry_inc(iter)) { 3263 if (!trace_find_next_entry_inc(iter)) {
3280 rem = 0; 3264 rem = 0;
3281 iter->ent = NULL; 3265 iter->ent = NULL;
3282 break; 3266 break;
@@ -3332,7 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3332 if (ret <= 0) 3316 if (ret <= 0)
3333 goto out_err; 3317 goto out_err;
3334 3318
3335 if (!iter->ent && !find_next_entry_inc(iter)) { 3319 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3336 ret = -EFAULT; 3320 ret = -EFAULT;
3337 goto out_err; 3321 goto out_err;
3338 } 3322 }
@@ -3479,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3479 size_t cnt, loff_t *fpos) 3463 size_t cnt, loff_t *fpos)
3480{ 3464{
3481 char *buf; 3465 char *buf;
3466 size_t written;
3482 3467
3483 if (tracing_disabled) 3468 if (tracing_disabled)
3484 return -EINVAL; 3469 return -EINVAL;
@@ -3500,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3500 } else 3485 } else
3501 buf[cnt] = '\0'; 3486 buf[cnt] = '\0';
3502 3487
3503 cnt = mark_printk("%s", buf); 3488 written = mark_printk("%s", buf);
3504 kfree(buf); 3489 kfree(buf);
3505 *fpos += cnt; 3490 *fpos += written;
3506 3491
3507 return cnt; 3492 /* don't tell userspace we wrote more - it might confuse them */
3493 if (written > cnt)
3494 written = cnt;
3495
3496 return written;
3508} 3497}
3509 3498
3510static int tracing_clock_show(struct seq_file *m, void *v) 3499static int tracing_clock_show(struct seq_file *m, void *v)
@@ -4402,7 +4391,7 @@ static struct notifier_block trace_die_notifier = {
4402 */ 4391 */
4403#define KERN_TRACE KERN_EMERG 4392#define KERN_TRACE KERN_EMERG
4404 4393
4405static void 4394void
4406trace_printk_seq(struct trace_seq *s) 4395trace_printk_seq(struct trace_seq *s)
4407{ 4396{
4408 /* Probably should print a warning here. */ 4397 /* Probably should print a warning here. */
@@ -4417,6 +4406,13 @@ trace_printk_seq(struct trace_seq *s)
4417 trace_seq_init(s); 4406 trace_seq_init(s);
4418} 4407}
4419 4408
4409void trace_init_global_iter(struct trace_iterator *iter)
4410{
4411 iter->tr = &global_trace;
4412 iter->trace = current_trace;
4413 iter->cpu_file = TRACE_PIPE_ALL_CPU;
4414}
4415
4420static void 4416static void
4421__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) 4417__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4422{ 4418{
@@ -4442,8 +4438,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4442 if (disable_tracing) 4438 if (disable_tracing)
4443 ftrace_kill(); 4439 ftrace_kill();
4444 4440
4441 trace_init_global_iter(&iter);
4442
4445 for_each_tracing_cpu(cpu) { 4443 for_each_tracing_cpu(cpu) {
4446 atomic_inc(&global_trace.data[cpu]->disabled); 4444 atomic_inc(&iter.tr->data[cpu]->disabled);
4447 } 4445 }
4448 4446
4449 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 4447 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -4492,7 +4490,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4492 iter.iter_flags |= TRACE_FILE_LAT_FMT; 4490 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4493 iter.pos = -1; 4491 iter.pos = -1;
4494 4492
4495 if (find_next_entry_inc(&iter) != NULL) { 4493 if (trace_find_next_entry_inc(&iter) != NULL) {
4496 int ret; 4494 int ret;
4497 4495
4498 ret = print_trace_line(&iter); 4496 ret = print_trace_line(&iter);
@@ -4514,7 +4512,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4514 trace_flags |= old_userobj; 4512 trace_flags |= old_userobj;
4515 4513
4516 for_each_tracing_cpu(cpu) { 4514 for_each_tracing_cpu(cpu) {
4517 atomic_dec(&global_trace.data[cpu]->disabled); 4515 atomic_dec(&iter.tr->data[cpu]->disabled);
4518 } 4516 }
4519 tracing_on(); 4517 tracing_on();
4520 } 4518 }
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d05c873dd4b..d39b3c5454a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -314,6 +314,14 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
314struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 314struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
315 int *ent_cpu, u64 *ent_ts); 315 int *ent_cpu, u64 *ent_ts);
316 316
317int trace_empty(struct trace_iterator *iter);
318
319void *trace_find_next_entry_inc(struct trace_iterator *iter);
320
321void trace_init_global_iter(struct trace_iterator *iter);
322
323void tracing_iter_reset(struct trace_iterator *iter, int cpu);
324
317void default_wait_pipe(struct trace_iterator *iter); 325void default_wait_pipe(struct trace_iterator *iter);
318void poll_wait_pipe(struct trace_iterator *iter); 326void poll_wait_pipe(struct trace_iterator *iter);
319 327
@@ -351,6 +359,15 @@ void tracing_start_sched_switch_record(void);
351int register_tracer(struct tracer *type); 359int register_tracer(struct tracer *type);
352void unregister_tracer(struct tracer *type); 360void unregister_tracer(struct tracer *type);
353int is_tracing_stopped(void); 361int is_tracing_stopped(void);
362enum trace_file_type {
363 TRACE_FILE_LAT_FMT = 1,
364 TRACE_FILE_ANNOTATE = 2,
365};
366
367extern cpumask_var_t __read_mostly tracing_buffer_mask;
368
369#define for_each_tracing_cpu(cpu) \
370 for_each_cpu(cpu, tracing_buffer_mask)
354 371
355extern unsigned long nsecs_to_usecs(unsigned long nsecs); 372extern unsigned long nsecs_to_usecs(unsigned long nsecs);
356 373
@@ -436,6 +453,8 @@ trace_array_vprintk(struct trace_array *tr,
436 unsigned long ip, const char *fmt, va_list args); 453 unsigned long ip, const char *fmt, va_list args);
437int trace_array_printk(struct trace_array *tr, 454int trace_array_printk(struct trace_array *tr,
438 unsigned long ip, const char *fmt, ...); 455 unsigned long ip, const char *fmt, ...);
456void trace_printk_seq(struct trace_seq *s);
457enum print_line_t print_trace_line(struct trace_iterator *iter);
439 458
440extern unsigned long trace_flags; 459extern unsigned long trace_flags;
441 460
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 52fda6c04ac..685a67d55db 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -55,7 +55,7 @@ u64 notrace trace_clock_local(void)
55 */ 55 */
56u64 notrace trace_clock(void) 56u64 notrace trace_clock(void)
57{ 57{
58 return cpu_clock(raw_smp_processor_id()); 58 return local_clock();
59} 59}
60 60
61 61
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 09b4fa6e4d3..398c0e8b332 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -598,88 +598,146 @@ out:
598 return ret; 598 return ret;
599} 599}
600 600
601static void print_event_fields(struct trace_seq *s, struct list_head *head) 601enum {
602 FORMAT_HEADER = 1,
603 FORMAT_FIELD_SEPERATOR = 2,
604 FORMAT_PRINTFMT = 3,
605};
606
607static void *f_next(struct seq_file *m, void *v, loff_t *pos)
602{ 608{
609 struct ftrace_event_call *call = m->private;
603 struct ftrace_event_field *field; 610 struct ftrace_event_field *field;
611 struct list_head *common_head = &ftrace_common_fields;
612 struct list_head *head = trace_get_fields(call);
604 613
605 list_for_each_entry_reverse(field, head, link) { 614 (*pos)++;
606 /*
607 * Smartly shows the array type(except dynamic array).
608 * Normal:
609 * field:TYPE VAR
610 * If TYPE := TYPE[LEN], it is shown:
611 * field:TYPE VAR[LEN]
612 */
613 const char *array_descriptor = strchr(field->type, '[');
614 615
615 if (!strncmp(field->type, "__data_loc", 10)) 616 switch ((unsigned long)v) {
616 array_descriptor = NULL; 617 case FORMAT_HEADER:
618 if (unlikely(list_empty(common_head)))
619 return NULL;
617 620
618 if (!array_descriptor) { 621 field = list_entry(common_head->prev,
619 trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" 622 struct ftrace_event_field, link);
620 "\tsize:%u;\tsigned:%d;\n", 623 return field;
621 field->type, field->name, field->offset, 624
622 field->size, !!field->is_signed); 625 case FORMAT_FIELD_SEPERATOR:
623 } else { 626 if (unlikely(list_empty(head)))
624 trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" 627 return NULL;
625 "\tsize:%u;\tsigned:%d;\n", 628
626 (int)(array_descriptor - field->type), 629 field = list_entry(head->prev, struct ftrace_event_field, link);
627 field->type, field->name, 630 return field;
628 array_descriptor, field->offset, 631
629 field->size, !!field->is_signed); 632 case FORMAT_PRINTFMT:
630 } 633 /* all done */
634 return NULL;
631 } 635 }
636
637 field = v;
638 if (field->link.prev == common_head)
639 return (void *)FORMAT_FIELD_SEPERATOR;
640 else if (field->link.prev == head)
641 return (void *)FORMAT_PRINTFMT;
642
643 field = list_entry(field->link.prev, struct ftrace_event_field, link);
644
645 return field;
632} 646}
633 647
634static ssize_t 648static void *f_start(struct seq_file *m, loff_t *pos)
635event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
636 loff_t *ppos)
637{ 649{
638 struct ftrace_event_call *call = filp->private_data; 650 loff_t l = 0;
639 struct list_head *head; 651 void *p;
640 struct trace_seq *s;
641 char *buf;
642 int r;
643 652
644 if (*ppos) 653 /* Start by showing the header */
654 if (!*pos)
655 return (void *)FORMAT_HEADER;
656
657 p = (void *)FORMAT_HEADER;
658 do {
659 p = f_next(m, p, &l);
660 } while (p && l < *pos);
661
662 return p;
663}
664
665static int f_show(struct seq_file *m, void *v)
666{
667 struct ftrace_event_call *call = m->private;
668 struct ftrace_event_field *field;
669 const char *array_descriptor;
670
671 switch ((unsigned long)v) {
672 case FORMAT_HEADER:
673 seq_printf(m, "name: %s\n", call->name);
674 seq_printf(m, "ID: %d\n", call->event.type);
675 seq_printf(m, "format:\n");
645 return 0; 676 return 0;
646 677
647 s = kmalloc(sizeof(*s), GFP_KERNEL); 678 case FORMAT_FIELD_SEPERATOR:
648 if (!s) 679 seq_putc(m, '\n');
649 return -ENOMEM; 680 return 0;
650 681
651 trace_seq_init(s); 682 case FORMAT_PRINTFMT:
683 seq_printf(m, "\nprint fmt: %s\n",
684 call->print_fmt);
685 return 0;
686 }
652 687
653 trace_seq_printf(s, "name: %s\n", call->name); 688 field = v;
654 trace_seq_printf(s, "ID: %d\n", call->event.type);
655 trace_seq_printf(s, "format:\n");
656 689
657 /* print common fields */ 690 /*
658 print_event_fields(s, &ftrace_common_fields); 691 * Smartly shows the array type(except dynamic array).
692 * Normal:
693 * field:TYPE VAR
694 * If TYPE := TYPE[LEN], it is shown:
695 * field:TYPE VAR[LEN]
696 */
697 array_descriptor = strchr(field->type, '[');
659 698
660 trace_seq_putc(s, '\n'); 699 if (!strncmp(field->type, "__data_loc", 10))
700 array_descriptor = NULL;
661 701
662 /* print event specific fields */ 702 if (!array_descriptor)
663 head = trace_get_fields(call); 703 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
664 print_event_fields(s, head); 704 field->type, field->name, field->offset,
705 field->size, !!field->is_signed);
706 else
707 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
708 (int)(array_descriptor - field->type),
709 field->type, field->name,
710 array_descriptor, field->offset,
711 field->size, !!field->is_signed);
665 712
666 r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); 713 return 0;
714}
667 715
668 if (!r) { 716static void f_stop(struct seq_file *m, void *p)
669 /* 717{
670 * ug! The format output is bigger than a PAGE!! 718}
671 */
672 buf = "FORMAT TOO BIG\n";
673 r = simple_read_from_buffer(ubuf, cnt, ppos,
674 buf, strlen(buf));
675 goto out;
676 }
677 719
678 r = simple_read_from_buffer(ubuf, cnt, ppos, 720static const struct seq_operations trace_format_seq_ops = {
679 s->buffer, s->len); 721 .start = f_start,
680 out: 722 .next = f_next,
681 kfree(s); 723 .stop = f_stop,
682 return r; 724 .show = f_show,
725};
726
727static int trace_format_open(struct inode *inode, struct file *file)
728{
729 struct ftrace_event_call *call = inode->i_private;
730 struct seq_file *m;
731 int ret;
732
733 ret = seq_open(file, &trace_format_seq_ops);
734 if (ret < 0)
735 return ret;
736
737 m = file->private_data;
738 m->private = call;
739
740 return 0;
683} 741}
684 742
685static ssize_t 743static ssize_t
@@ -877,8 +935,10 @@ static const struct file_operations ftrace_enable_fops = {
877}; 935};
878 936
879static const struct file_operations ftrace_event_format_fops = { 937static const struct file_operations ftrace_event_format_fops = {
880 .open = tracing_open_generic, 938 .open = trace_format_open,
881 .read = event_format_read, 939 .read = seq_read,
940 .llseek = seq_lseek,
941 .release = seq_release,
882}; 942};
883 943
884static const struct file_operations ftrace_event_id_fops = { 944static const struct file_operations ftrace_event_id_fops = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index fcb5a542cd2..c93bcb24863 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
507 * if the output fails. 507 * if the output fails.
508 */ 508 */
509 data->ent = *curr; 509 data->ent = *curr;
510 data->ret = *next; 510 /*
511 * If the next event is not a return type, then
512 * we only care about what type it is. Otherwise we can
513 * safely copy the entire event.
514 */
515 if (next->ent.type == TRACE_GRAPH_RET)
516 data->ret = *next;
517 else
518 data->ret.ent.type = next->ent.type;
511 } 519 }
512 } 520 }
513 521
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
new file mode 100644
index 00000000000..7b8ecd751d9
--- /dev/null
+++ b/kernel/trace/trace_kdb.c
@@ -0,0 +1,136 @@
1/*
2 * kdb helper for dumping the ftrace buffer
3 *
4 * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
5 *
6 * ftrace_dump_buf based on ftrace_dump:
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
9 *
10 */
11#include <linux/init.h>
12#include <linux/kgdb.h>
13#include <linux/kdb.h>
14#include <linux/ftrace.h>
15
16#include "../debug/kdb/kdb_private.h"
17#include "trace.h"
18#include "trace_output.h"
19
20static void ftrace_dump_buf(int skip_lines, long cpu_file)
21{
22 /* use static because iter can be a bit big for the stack */
23 static struct trace_iterator iter;
24 unsigned int old_userobj;
25 int cnt = 0, cpu;
26
27 trace_init_global_iter(&iter);
28
29 for_each_tracing_cpu(cpu) {
30 atomic_inc(&iter.tr->data[cpu]->disabled);
31 }
32
33 old_userobj = trace_flags;
34
35 /* don't look at user memory in panic mode */
36 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
37
38 kdb_printf("Dumping ftrace buffer:\n");
39
40 /* reset all but tr, trace, and overruns */
41 memset(&iter.seq, 0,
42 sizeof(struct trace_iterator) -
43 offsetof(struct trace_iterator, seq));
44 iter.iter_flags |= TRACE_FILE_LAT_FMT;
45 iter.pos = -1;
46
47 if (cpu_file == TRACE_PIPE_ALL_CPU) {
48 for_each_tracing_cpu(cpu) {
49 iter.buffer_iter[cpu] =
50 ring_buffer_read_prepare(iter.tr->buffer, cpu);
51 ring_buffer_read_start(iter.buffer_iter[cpu]);
52 tracing_iter_reset(&iter, cpu);
53 }
54 } else {
55 iter.cpu_file = cpu_file;
56 iter.buffer_iter[cpu_file] =
57 ring_buffer_read_prepare(iter.tr->buffer, cpu_file);
58 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
59 tracing_iter_reset(&iter, cpu_file);
60 }
61 if (!trace_empty(&iter))
62 trace_find_next_entry_inc(&iter);
63 while (!trace_empty(&iter)) {
64 if (!cnt)
65 kdb_printf("---------------------------------\n");
66 cnt++;
67
68 if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines)
69 print_trace_line(&iter);
70 if (!skip_lines)
71 trace_printk_seq(&iter.seq);
72 else
73 skip_lines--;
74 if (KDB_FLAG(CMD_INTERRUPT))
75 goto out;
76 }
77
78 if (!cnt)
79 kdb_printf(" (ftrace buffer empty)\n");
80 else
81 kdb_printf("---------------------------------\n");
82
83out:
84 trace_flags = old_userobj;
85
86 for_each_tracing_cpu(cpu) {
87 atomic_dec(&iter.tr->data[cpu]->disabled);
88 }
89
90 for_each_tracing_cpu(cpu)
91 if (iter.buffer_iter[cpu])
92 ring_buffer_read_finish(iter.buffer_iter[cpu]);
93}
94
95/*
96 * kdb_ftdump - Dump the ftrace log buffer
97 */
98static int kdb_ftdump(int argc, const char **argv)
99{
100 int skip_lines = 0;
101 long cpu_file;
102 char *cp;
103
104 if (argc > 2)
105 return KDB_ARGCOUNT;
106
107 if (argc) {
108 skip_lines = simple_strtol(argv[1], &cp, 0);
109 if (*cp)
110 skip_lines = 0;
111 }
112
113 if (argc == 2) {
114 cpu_file = simple_strtol(argv[2], &cp, 0);
115 if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
116 !cpu_online(cpu_file))
117 return KDB_BADINT;
118 } else {
119 cpu_file = TRACE_PIPE_ALL_CPU;
120 }
121
122 kdb_trap_printk++;
123 ftrace_dump_buf(skip_lines, cpu_file);
124 kdb_trap_printk--;
125
126 return 0;
127}
128
129static __init int kdb_ftrace_register(void)
130{
131 kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
132 "Dump ftrace log", 0, KDB_REPEAT_NONE);
133 return 0;
134}
135
136late_initcall(kdb_ftrace_register);