diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 15 | ||||
-rw-r--r-- | kernel/trace/Makefile | 3 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 88 | ||||
-rw-r--r-- | kernel/trace/trace.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace.h | 19 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kdb.c | 136 |
7 files changed, 254 insertions, 60 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index c7683fd8a03a..538501c6ea50 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -153,7 +153,7 @@ config IRQSOFF_TRACER | |||
153 | bool "Interrupts-off Latency Tracer" | 153 | bool "Interrupts-off Latency Tracer" |
154 | default n | 154 | default n |
155 | depends on TRACE_IRQFLAGS_SUPPORT | 155 | depends on TRACE_IRQFLAGS_SUPPORT |
156 | depends on GENERIC_TIME | 156 | depends on !ARCH_USES_GETTIMEOFFSET |
157 | select TRACE_IRQFLAGS | 157 | select TRACE_IRQFLAGS |
158 | select GENERIC_TRACER | 158 | select GENERIC_TRACER |
159 | select TRACER_MAX_TRACE | 159 | select TRACER_MAX_TRACE |
@@ -175,7 +175,7 @@ config IRQSOFF_TRACER | |||
175 | config PREEMPT_TRACER | 175 | config PREEMPT_TRACER |
176 | bool "Preemption-off Latency Tracer" | 176 | bool "Preemption-off Latency Tracer" |
177 | default n | 177 | default n |
178 | depends on GENERIC_TIME | 178 | depends on !ARCH_USES_GETTIMEOFFSET |
179 | depends on PREEMPT | 179 | depends on PREEMPT |
180 | select GENERIC_TRACER | 180 | select GENERIC_TRACER |
181 | select TRACER_MAX_TRACE | 181 | select TRACER_MAX_TRACE |
@@ -323,17 +323,6 @@ config STACK_TRACER | |||
323 | 323 | ||
324 | Say N if unsure. | 324 | Say N if unsure. |
325 | 325 | ||
326 | config WORKQUEUE_TRACER | ||
327 | bool "Trace workqueues" | ||
328 | select GENERIC_TRACER | ||
329 | help | ||
330 | The workqueue tracer provides some statistical information | ||
331 | about each cpu workqueue thread such as the number of the | ||
332 | works inserted and executed since their creation. It can help | ||
333 | to evaluate the amount of work each of them has to perform. | ||
334 | For example it can help a developer to decide whether he should | ||
335 | choose a per-cpu workqueue instead of a singlethreaded one. | ||
336 | |||
337 | config BLK_DEV_IO_TRACE | 326 | config BLK_DEV_IO_TRACE |
338 | bool "Support for tracing block IO actions" | 327 | bool "Support for tracing block IO actions" |
339 | depends on SYSFS | 328 | depends on SYSFS |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 438e84a56ab3..53f338190b26 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -53,5 +53,8 @@ endif | |||
53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
55 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 55 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o |
56 | ifeq ($(CONFIG_TRACING),y) | ||
57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | ||
58 | endif | ||
56 | 59 | ||
57 | libftrace-y := ftrace.o | 60 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 638711c17504..959f8d6c8cc1 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
170 | BLK_TC_ACT(BLK_TC_WRITE) }; | 170 | BLK_TC_ACT(BLK_TC_WRITE) }; |
171 | 171 | ||
172 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
173 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | ||
174 | |||
172 | /* The ilog2() calls fall out because they're constant */ | 175 | /* The ilog2() calls fall out because they're constant */ |
173 | #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ | 176 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ |
174 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) | 177 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) |
175 | 178 | ||
176 | /* | 179 | /* |
177 | * The worker for the various blk_add_trace*() types. Fills out a | 180 | * The worker for the various blk_add_trace*() types. Fills out a |
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
194 | return; | 197 | return; |
195 | 198 | ||
196 | what |= ddir_act[rw & WRITE]; | 199 | what |= ddir_act[rw & WRITE]; |
197 | what |= MASK_TC_BIT(rw, BARRIER); | 200 | what |= MASK_TC_BIT(rw, HARDBARRIER); |
198 | what |= MASK_TC_BIT(rw, SYNCIO); | 201 | what |= MASK_TC_BIT(rw, SYNC); |
199 | what |= MASK_TC_BIT(rw, AHEAD); | 202 | what |= MASK_TC_BIT(rw, RAHEAD); |
200 | what |= MASK_TC_BIT(rw, META); | 203 | what |= MASK_TC_BIT(rw, META); |
201 | what |= MASK_TC_BIT(rw, DISCARD); | 204 | what |= MASK_TC_BIT(rw, DISCARD); |
202 | 205 | ||
@@ -549,6 +552,41 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
549 | } | 552 | } |
550 | EXPORT_SYMBOL_GPL(blk_trace_setup); | 553 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
551 | 554 | ||
555 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) | ||
556 | static int compat_blk_trace_setup(struct request_queue *q, char *name, | ||
557 | dev_t dev, struct block_device *bdev, | ||
558 | char __user *arg) | ||
559 | { | ||
560 | struct blk_user_trace_setup buts; | ||
561 | struct compat_blk_user_trace_setup cbuts; | ||
562 | int ret; | ||
563 | |||
564 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) | ||
565 | return -EFAULT; | ||
566 | |||
567 | buts = (struct blk_user_trace_setup) { | ||
568 | .act_mask = cbuts.act_mask, | ||
569 | .buf_size = cbuts.buf_size, | ||
570 | .buf_nr = cbuts.buf_nr, | ||
571 | .start_lba = cbuts.start_lba, | ||
572 | .end_lba = cbuts.end_lba, | ||
573 | .pid = cbuts.pid, | ||
574 | }; | ||
575 | memcpy(&buts.name, &cbuts.name, 32); | ||
576 | |||
577 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | |||
581 | if (copy_to_user(arg, &buts.name, 32)) { | ||
582 | blk_trace_remove(q); | ||
583 | return -EFAULT; | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | #endif | ||
589 | |||
552 | int blk_trace_startstop(struct request_queue *q, int start) | 590 | int blk_trace_startstop(struct request_queue *q, int start) |
553 | { | 591 | { |
554 | int ret; | 592 | int ret; |
@@ -601,6 +639,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
601 | if (!q) | 639 | if (!q) |
602 | return -ENXIO; | 640 | return -ENXIO; |
603 | 641 | ||
642 | lock_kernel(); | ||
604 | mutex_lock(&bdev->bd_mutex); | 643 | mutex_lock(&bdev->bd_mutex); |
605 | 644 | ||
606 | switch (cmd) { | 645 | switch (cmd) { |
@@ -608,6 +647,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
608 | bdevname(bdev, b); | 647 | bdevname(bdev, b); |
609 | ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); | 648 | ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); |
610 | break; | 649 | break; |
650 | #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) | ||
651 | case BLKTRACESETUP32: | ||
652 | bdevname(bdev, b); | ||
653 | ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); | ||
654 | break; | ||
655 | #endif | ||
611 | case BLKTRACESTART: | 656 | case BLKTRACESTART: |
612 | start = 1; | 657 | start = 1; |
613 | case BLKTRACESTOP: | 658 | case BLKTRACESTOP: |
@@ -622,6 +667,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
622 | } | 667 | } |
623 | 668 | ||
624 | mutex_unlock(&bdev->bd_mutex); | 669 | mutex_unlock(&bdev->bd_mutex); |
670 | unlock_kernel(); | ||
625 | return ret; | 671 | return ret; |
626 | } | 672 | } |
627 | 673 | ||
@@ -661,10 +707,13 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
661 | if (likely(!bt)) | 707 | if (likely(!bt)) |
662 | return; | 708 | return; |
663 | 709 | ||
664 | if (blk_discard_rq(rq)) | 710 | if (rq->cmd_flags & REQ_DISCARD) |
665 | rw |= (1 << BIO_RW_DISCARD); | 711 | rw |= REQ_DISCARD; |
712 | |||
713 | if (rq->cmd_flags & REQ_SECURE) | ||
714 | rw |= REQ_SECURE; | ||
666 | 715 | ||
667 | if (blk_pc_request(rq)) { | 716 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
668 | what |= BLK_TC_ACT(BLK_TC_PC); | 717 | what |= BLK_TC_ACT(BLK_TC_PC); |
669 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, | 718 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, |
670 | what, rq->errors, rq->cmd_len, rq->cmd); | 719 | what, rq->errors, rq->cmd_len, rq->cmd); |
@@ -925,7 +974,7 @@ void blk_add_driver_data(struct request_queue *q, | |||
925 | if (likely(!bt)) | 974 | if (likely(!bt)) |
926 | return; | 975 | return; |
927 | 976 | ||
928 | if (blk_pc_request(rq)) | 977 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) |
929 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, | 978 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, |
930 | BLK_TA_DRV_DATA, rq->errors, len, data); | 979 | BLK_TA_DRV_DATA, rq->errors, len, data); |
931 | else | 980 | else |
@@ -1730,7 +1779,7 @@ void blk_dump_cmd(char *buf, struct request *rq) | |||
1730 | int len = rq->cmd_len; | 1779 | int len = rq->cmd_len; |
1731 | unsigned char *cmd = rq->cmd; | 1780 | unsigned char *cmd = rq->cmd; |
1732 | 1781 | ||
1733 | if (!blk_pc_request(rq)) { | 1782 | if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { |
1734 | buf[0] = '\0'; | 1783 | buf[0] = '\0'; |
1735 | return; | 1784 | return; |
1736 | } | 1785 | } |
@@ -1755,21 +1804,23 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1755 | 1804 | ||
1756 | if (rw & WRITE) | 1805 | if (rw & WRITE) |
1757 | rwbs[i++] = 'W'; | 1806 | rwbs[i++] = 'W'; |
1758 | else if (rw & 1 << BIO_RW_DISCARD) | 1807 | else if (rw & REQ_DISCARD) |
1759 | rwbs[i++] = 'D'; | 1808 | rwbs[i++] = 'D'; |
1760 | else if (bytes) | 1809 | else if (bytes) |
1761 | rwbs[i++] = 'R'; | 1810 | rwbs[i++] = 'R'; |
1762 | else | 1811 | else |
1763 | rwbs[i++] = 'N'; | 1812 | rwbs[i++] = 'N'; |
1764 | 1813 | ||
1765 | if (rw & 1 << BIO_RW_AHEAD) | 1814 | if (rw & REQ_RAHEAD) |
1766 | rwbs[i++] = 'A'; | 1815 | rwbs[i++] = 'A'; |
1767 | if (rw & 1 << BIO_RW_BARRIER) | 1816 | if (rw & REQ_HARDBARRIER) |
1768 | rwbs[i++] = 'B'; | 1817 | rwbs[i++] = 'B'; |
1769 | if (rw & 1 << BIO_RW_SYNCIO) | 1818 | if (rw & REQ_SYNC) |
1770 | rwbs[i++] = 'S'; | 1819 | rwbs[i++] = 'S'; |
1771 | if (rw & 1 << BIO_RW_META) | 1820 | if (rw & REQ_META) |
1772 | rwbs[i++] = 'M'; | 1821 | rwbs[i++] = 'M'; |
1822 | if (rw & REQ_SECURE) | ||
1823 | rwbs[i++] = 'E'; | ||
1773 | 1824 | ||
1774 | rwbs[i] = '\0'; | 1825 | rwbs[i] = '\0'; |
1775 | } | 1826 | } |
@@ -1779,8 +1830,11 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) | |||
1779 | int rw = rq->cmd_flags & 0x03; | 1830 | int rw = rq->cmd_flags & 0x03; |
1780 | int bytes; | 1831 | int bytes; |
1781 | 1832 | ||
1782 | if (blk_discard_rq(rq)) | 1833 | if (rq->cmd_flags & REQ_DISCARD) |
1783 | rw |= (1 << BIO_RW_DISCARD); | 1834 | rw |= REQ_DISCARD; |
1835 | |||
1836 | if (rq->cmd_flags & REQ_SECURE) | ||
1837 | rw |= REQ_SECURE; | ||
1784 | 1838 | ||
1785 | bytes = blk_rq_bytes(rq); | 1839 | bytes = blk_rq_bytes(rq); |
1786 | 1840 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4b1122d0df37..ba14a22be4cc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void) | |||
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | cpumask_var_t __read_mostly tracing_buffer_mask; |
105 | |||
106 | #define for_each_tracing_cpu(cpu) \ | ||
107 | for_each_cpu(cpu, tracing_buffer_mask) | ||
108 | 105 | ||
109 | /* | 106 | /* |
110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 107 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -744,13 +741,6 @@ __acquires(kernel_lock) | |||
744 | return -1; | 741 | return -1; |
745 | } | 742 | } |
746 | 743 | ||
747 | /* | ||
748 | * When this gets called we hold the BKL which means that | ||
749 | * preemption is disabled. Various trace selftests however | ||
750 | * need to disable and enable preemption for successful tests. | ||
751 | * So we drop the BKL here and grab it after the tests again. | ||
752 | */ | ||
753 | unlock_kernel(); | ||
754 | mutex_lock(&trace_types_lock); | 744 | mutex_lock(&trace_types_lock); |
755 | 745 | ||
756 | tracing_selftest_running = true; | 746 | tracing_selftest_running = true; |
@@ -832,7 +822,6 @@ __acquires(kernel_lock) | |||
832 | #endif | 822 | #endif |
833 | 823 | ||
834 | out_unlock: | 824 | out_unlock: |
835 | lock_kernel(); | ||
836 | return ret; | 825 | return ret; |
837 | } | 826 | } |
838 | 827 | ||
@@ -1493,11 +1482,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1493 | } | 1482 | } |
1494 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1483 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1495 | 1484 | ||
1496 | enum trace_file_type { | ||
1497 | TRACE_FILE_LAT_FMT = 1, | ||
1498 | TRACE_FILE_ANNOTATE = 2, | ||
1499 | }; | ||
1500 | |||
1501 | static void trace_iterator_increment(struct trace_iterator *iter) | 1485 | static void trace_iterator_increment(struct trace_iterator *iter) |
1502 | { | 1486 | { |
1503 | /* Don't allow ftrace to trace into the ring buffers */ | 1487 | /* Don't allow ftrace to trace into the ring buffers */ |
@@ -1595,7 +1579,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |||
1595 | } | 1579 | } |
1596 | 1580 | ||
1597 | /* Find the next real entry, and increment the iterator to the next entry */ | 1581 | /* Find the next real entry, and increment the iterator to the next entry */ |
1598 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1582 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
1599 | { | 1583 | { |
1600 | iter->ent = __find_next_entry(iter, &iter->cpu, | 1584 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1601 | &iter->lost_events, &iter->ts); | 1585 | &iter->lost_events, &iter->ts); |
@@ -1630,19 +1614,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1630 | return NULL; | 1614 | return NULL; |
1631 | 1615 | ||
1632 | if (iter->idx < 0) | 1616 | if (iter->idx < 0) |
1633 | ent = find_next_entry_inc(iter); | 1617 | ent = trace_find_next_entry_inc(iter); |
1634 | else | 1618 | else |
1635 | ent = iter; | 1619 | ent = iter; |
1636 | 1620 | ||
1637 | while (ent && iter->idx < i) | 1621 | while (ent && iter->idx < i) |
1638 | ent = find_next_entry_inc(iter); | 1622 | ent = trace_find_next_entry_inc(iter); |
1639 | 1623 | ||
1640 | iter->pos = *pos; | 1624 | iter->pos = *pos; |
1641 | 1625 | ||
1642 | return ent; | 1626 | return ent; |
1643 | } | 1627 | } |
1644 | 1628 | ||
1645 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1629 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1646 | { | 1630 | { |
1647 | struct trace_array *tr = iter->tr; | 1631 | struct trace_array *tr = iter->tr; |
1648 | struct ring_buffer_event *event; | 1632 | struct ring_buffer_event *event; |
@@ -2003,7 +1987,7 @@ int trace_empty(struct trace_iterator *iter) | |||
2003 | } | 1987 | } |
2004 | 1988 | ||
2005 | /* Called with trace_event_read_lock() held. */ | 1989 | /* Called with trace_event_read_lock() held. */ |
2006 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1990 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
2007 | { | 1991 | { |
2008 | enum print_line_t ret; | 1992 | enum print_line_t ret; |
2009 | 1993 | ||
@@ -3193,7 +3177,7 @@ waitagain: | |||
3193 | 3177 | ||
3194 | trace_event_read_lock(); | 3178 | trace_event_read_lock(); |
3195 | trace_access_lock(iter->cpu_file); | 3179 | trace_access_lock(iter->cpu_file); |
3196 | while (find_next_entry_inc(iter) != NULL) { | 3180 | while (trace_find_next_entry_inc(iter) != NULL) { |
3197 | enum print_line_t ret; | 3181 | enum print_line_t ret; |
3198 | int len = iter->seq.len; | 3182 | int len = iter->seq.len; |
3199 | 3183 | ||
@@ -3276,7 +3260,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3276 | if (ret != TRACE_TYPE_NO_CONSUME) | 3260 | if (ret != TRACE_TYPE_NO_CONSUME) |
3277 | trace_consume(iter); | 3261 | trace_consume(iter); |
3278 | rem -= count; | 3262 | rem -= count; |
3279 | if (!find_next_entry_inc(iter)) { | 3263 | if (!trace_find_next_entry_inc(iter)) { |
3280 | rem = 0; | 3264 | rem = 0; |
3281 | iter->ent = NULL; | 3265 | iter->ent = NULL; |
3282 | break; | 3266 | break; |
@@ -3332,7 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3332 | if (ret <= 0) | 3316 | if (ret <= 0) |
3333 | goto out_err; | 3317 | goto out_err; |
3334 | 3318 | ||
3335 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3319 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3336 | ret = -EFAULT; | 3320 | ret = -EFAULT; |
3337 | goto out_err; | 3321 | goto out_err; |
3338 | } | 3322 | } |
@@ -4402,7 +4386,7 @@ static struct notifier_block trace_die_notifier = { | |||
4402 | */ | 4386 | */ |
4403 | #define KERN_TRACE KERN_EMERG | 4387 | #define KERN_TRACE KERN_EMERG |
4404 | 4388 | ||
4405 | static void | 4389 | void |
4406 | trace_printk_seq(struct trace_seq *s) | 4390 | trace_printk_seq(struct trace_seq *s) |
4407 | { | 4391 | { |
4408 | /* Probably should print a warning here. */ | 4392 | /* Probably should print a warning here. */ |
@@ -4417,6 +4401,13 @@ trace_printk_seq(struct trace_seq *s) | |||
4417 | trace_seq_init(s); | 4401 | trace_seq_init(s); |
4418 | } | 4402 | } |
4419 | 4403 | ||
4404 | void trace_init_global_iter(struct trace_iterator *iter) | ||
4405 | { | ||
4406 | iter->tr = &global_trace; | ||
4407 | iter->trace = current_trace; | ||
4408 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | ||
4409 | } | ||
4410 | |||
4420 | static void | 4411 | static void |
4421 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | 4412 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
4422 | { | 4413 | { |
@@ -4442,8 +4433,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4442 | if (disable_tracing) | 4433 | if (disable_tracing) |
4443 | ftrace_kill(); | 4434 | ftrace_kill(); |
4444 | 4435 | ||
4436 | trace_init_global_iter(&iter); | ||
4437 | |||
4445 | for_each_tracing_cpu(cpu) { | 4438 | for_each_tracing_cpu(cpu) { |
4446 | atomic_inc(&global_trace.data[cpu]->disabled); | 4439 | atomic_inc(&iter.tr->data[cpu]->disabled); |
4447 | } | 4440 | } |
4448 | 4441 | ||
4449 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4442 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -4492,7 +4485,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4492 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4485 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4493 | iter.pos = -1; | 4486 | iter.pos = -1; |
4494 | 4487 | ||
4495 | if (find_next_entry_inc(&iter) != NULL) { | 4488 | if (trace_find_next_entry_inc(&iter) != NULL) { |
4496 | int ret; | 4489 | int ret; |
4497 | 4490 | ||
4498 | ret = print_trace_line(&iter); | 4491 | ret = print_trace_line(&iter); |
@@ -4514,7 +4507,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4514 | trace_flags |= old_userobj; | 4507 | trace_flags |= old_userobj; |
4515 | 4508 | ||
4516 | for_each_tracing_cpu(cpu) { | 4509 | for_each_tracing_cpu(cpu) { |
4517 | atomic_dec(&global_trace.data[cpu]->disabled); | 4510 | atomic_dec(&iter.tr->data[cpu]->disabled); |
4518 | } | 4511 | } |
4519 | tracing_on(); | 4512 | tracing_on(); |
4520 | } | 4513 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d05c873dd4b2..d39b3c5454a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -314,6 +314,14 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
314 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 314 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
315 | int *ent_cpu, u64 *ent_ts); | 315 | int *ent_cpu, u64 *ent_ts); |
316 | 316 | ||
317 | int trace_empty(struct trace_iterator *iter); | ||
318 | |||
319 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | ||
320 | |||
321 | void trace_init_global_iter(struct trace_iterator *iter); | ||
322 | |||
323 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | ||
324 | |||
317 | void default_wait_pipe(struct trace_iterator *iter); | 325 | void default_wait_pipe(struct trace_iterator *iter); |
318 | void poll_wait_pipe(struct trace_iterator *iter); | 326 | void poll_wait_pipe(struct trace_iterator *iter); |
319 | 327 | ||
@@ -351,6 +359,15 @@ void tracing_start_sched_switch_record(void); | |||
351 | int register_tracer(struct tracer *type); | 359 | int register_tracer(struct tracer *type); |
352 | void unregister_tracer(struct tracer *type); | 360 | void unregister_tracer(struct tracer *type); |
353 | int is_tracing_stopped(void); | 361 | int is_tracing_stopped(void); |
362 | enum trace_file_type { | ||
363 | TRACE_FILE_LAT_FMT = 1, | ||
364 | TRACE_FILE_ANNOTATE = 2, | ||
365 | }; | ||
366 | |||
367 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | ||
368 | |||
369 | #define for_each_tracing_cpu(cpu) \ | ||
370 | for_each_cpu(cpu, tracing_buffer_mask) | ||
354 | 371 | ||
355 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 372 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
356 | 373 | ||
@@ -436,6 +453,8 @@ trace_array_vprintk(struct trace_array *tr, | |||
436 | unsigned long ip, const char *fmt, va_list args); | 453 | unsigned long ip, const char *fmt, va_list args); |
437 | int trace_array_printk(struct trace_array *tr, | 454 | int trace_array_printk(struct trace_array *tr, |
438 | unsigned long ip, const char *fmt, ...); | 455 | unsigned long ip, const char *fmt, ...); |
456 | void trace_printk_seq(struct trace_seq *s); | ||
457 | enum print_line_t print_trace_line(struct trace_iterator *iter); | ||
439 | 458 | ||
440 | extern unsigned long trace_flags; | 459 | extern unsigned long trace_flags; |
441 | 460 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 52fda6c04ac3..685a67d55db0 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -55,7 +55,7 @@ u64 notrace trace_clock_local(void) | |||
55 | */ | 55 | */ |
56 | u64 notrace trace_clock(void) | 56 | u64 notrace trace_clock(void) |
57 | { | 57 | { |
58 | return cpu_clock(raw_smp_processor_id()); | 58 | return local_clock(); |
59 | } | 59 | } |
60 | 60 | ||
61 | 61 | ||
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c new file mode 100644 index 000000000000..7b8ecd751d93 --- /dev/null +++ b/kernel/trace/trace_kdb.c | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * kdb helper for dumping the ftrace buffer | ||
3 | * | ||
4 | * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com> | ||
5 | * | ||
6 | * ftrace_dump_buf based on ftrace_dump: | ||
7 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
8 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
9 | * | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kgdb.h> | ||
13 | #include <linux/kdb.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | |||
16 | #include "../debug/kdb/kdb_private.h" | ||
17 | #include "trace.h" | ||
18 | #include "trace_output.h" | ||
19 | |||
20 | static void ftrace_dump_buf(int skip_lines, long cpu_file) | ||
21 | { | ||
22 | /* use static because iter can be a bit big for the stack */ | ||
23 | static struct trace_iterator iter; | ||
24 | unsigned int old_userobj; | ||
25 | int cnt = 0, cpu; | ||
26 | |||
27 | trace_init_global_iter(&iter); | ||
28 | |||
29 | for_each_tracing_cpu(cpu) { | ||
30 | atomic_inc(&iter.tr->data[cpu]->disabled); | ||
31 | } | ||
32 | |||
33 | old_userobj = trace_flags; | ||
34 | |||
35 | /* don't look at user memory in panic mode */ | ||
36 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | ||
37 | |||
38 | kdb_printf("Dumping ftrace buffer:\n"); | ||
39 | |||
40 | /* reset all but tr, trace, and overruns */ | ||
41 | memset(&iter.seq, 0, | ||
42 | sizeof(struct trace_iterator) - | ||
43 | offsetof(struct trace_iterator, seq)); | ||
44 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | ||
45 | iter.pos = -1; | ||
46 | |||
47 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | ||
48 | for_each_tracing_cpu(cpu) { | ||
49 | iter.buffer_iter[cpu] = | ||
50 | ring_buffer_read_prepare(iter.tr->buffer, cpu); | ||
51 | ring_buffer_read_start(iter.buffer_iter[cpu]); | ||
52 | tracing_iter_reset(&iter, cpu); | ||
53 | } | ||
54 | } else { | ||
55 | iter.cpu_file = cpu_file; | ||
56 | iter.buffer_iter[cpu_file] = | ||
57 | ring_buffer_read_prepare(iter.tr->buffer, cpu_file); | ||
58 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); | ||
59 | tracing_iter_reset(&iter, cpu_file); | ||
60 | } | ||
61 | if (!trace_empty(&iter)) | ||
62 | trace_find_next_entry_inc(&iter); | ||
63 | while (!trace_empty(&iter)) { | ||
64 | if (!cnt) | ||
65 | kdb_printf("---------------------------------\n"); | ||
66 | cnt++; | ||
67 | |||
68 | if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) | ||
69 | print_trace_line(&iter); | ||
70 | if (!skip_lines) | ||
71 | trace_printk_seq(&iter.seq); | ||
72 | else | ||
73 | skip_lines--; | ||
74 | if (KDB_FLAG(CMD_INTERRUPT)) | ||
75 | goto out; | ||
76 | } | ||
77 | |||
78 | if (!cnt) | ||
79 | kdb_printf(" (ftrace buffer empty)\n"); | ||
80 | else | ||
81 | kdb_printf("---------------------------------\n"); | ||
82 | |||
83 | out: | ||
84 | trace_flags = old_userobj; | ||
85 | |||
86 | for_each_tracing_cpu(cpu) { | ||
87 | atomic_dec(&iter.tr->data[cpu]->disabled); | ||
88 | } | ||
89 | |||
90 | for_each_tracing_cpu(cpu) | ||
91 | if (iter.buffer_iter[cpu]) | ||
92 | ring_buffer_read_finish(iter.buffer_iter[cpu]); | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * kdb_ftdump - Dump the ftrace log buffer | ||
97 | */ | ||
98 | static int kdb_ftdump(int argc, const char **argv) | ||
99 | { | ||
100 | int skip_lines = 0; | ||
101 | long cpu_file; | ||
102 | char *cp; | ||
103 | |||
104 | if (argc > 2) | ||
105 | return KDB_ARGCOUNT; | ||
106 | |||
107 | if (argc) { | ||
108 | skip_lines = simple_strtol(argv[1], &cp, 0); | ||
109 | if (*cp) | ||
110 | skip_lines = 0; | ||
111 | } | ||
112 | |||
113 | if (argc == 2) { | ||
114 | cpu_file = simple_strtol(argv[2], &cp, 0); | ||
115 | if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || | ||
116 | !cpu_online(cpu_file)) | ||
117 | return KDB_BADINT; | ||
118 | } else { | ||
119 | cpu_file = TRACE_PIPE_ALL_CPU; | ||
120 | } | ||
121 | |||
122 | kdb_trap_printk++; | ||
123 | ftrace_dump_buf(skip_lines, cpu_file); | ||
124 | kdb_trap_printk--; | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static __init int kdb_ftrace_register(void) | ||
130 | { | ||
131 | kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", | ||
132 | "Dump ftrace log", 0, KDB_REPEAT_NONE); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | late_initcall(kdb_ftrace_register); | ||