diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 453 |
1 files changed, 295 insertions, 158 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed7b5d1e12f4..48ef4960ec90 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -87,18 +87,6 @@ static int tracing_disabled = 1; | |||
87 | 87 | ||
88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
89 | 89 | ||
90 | static inline void ftrace_disable_cpu(void) | ||
91 | { | ||
92 | preempt_disable(); | ||
93 | __this_cpu_inc(ftrace_cpu_disabled); | ||
94 | } | ||
95 | |||
96 | static inline void ftrace_enable_cpu(void) | ||
97 | { | ||
98 | __this_cpu_dec(ftrace_cpu_disabled); | ||
99 | preempt_enable(); | ||
100 | } | ||
101 | |||
102 | cpumask_var_t __read_mostly tracing_buffer_mask; | 90 | cpumask_var_t __read_mostly tracing_buffer_mask; |
103 | 91 | ||
104 | /* | 92 | /* |
@@ -629,7 +617,6 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | |||
629 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 617 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
630 | { | 618 | { |
631 | int len; | 619 | int len; |
632 | void *ret; | ||
633 | 620 | ||
634 | if (s->len <= s->readpos) | 621 | if (s->len <= s->readpos) |
635 | return -EBUSY; | 622 | return -EBUSY; |
@@ -637,9 +624,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
637 | len = s->len - s->readpos; | 624 | len = s->len - s->readpos; |
638 | if (cnt > len) | 625 | if (cnt > len) |
639 | cnt = len; | 626 | cnt = len; |
640 | ret = memcpy(buf, s->buffer + s->readpos, cnt); | 627 | memcpy(buf, s->buffer + s->readpos, cnt); |
641 | if (!ret) | ||
642 | return -EFAULT; | ||
643 | 628 | ||
644 | s->readpos += cnt; | 629 | s->readpos += cnt; |
645 | return cnt; | 630 | return cnt; |
@@ -751,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
751 | 736 | ||
752 | arch_spin_lock(&ftrace_max_lock); | 737 | arch_spin_lock(&ftrace_max_lock); |
753 | 738 | ||
754 | ftrace_disable_cpu(); | ||
755 | |||
756 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 739 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
757 | 740 | ||
758 | if (ret == -EBUSY) { | 741 | if (ret == -EBUSY) { |
@@ -766,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
766 | "Failed to swap buffers due to commit in progress\n"); | 749 | "Failed to swap buffers due to commit in progress\n"); |
767 | } | 750 | } |
768 | 751 | ||
769 | ftrace_enable_cpu(); | ||
770 | |||
771 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 752 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
772 | 753 | ||
773 | __update_max_tr(tr, tsk, cpu); | 754 | __update_max_tr(tr, tsk, cpu); |
@@ -841,7 +822,8 @@ __acquires(kernel_lock) | |||
841 | 822 | ||
842 | /* If we expanded the buffers, make sure the max is expanded too */ | 823 | /* If we expanded the buffers, make sure the max is expanded too */ |
843 | if (ring_buffer_expanded && type->use_max_tr) | 824 | if (ring_buffer_expanded && type->use_max_tr) |
844 | ring_buffer_resize(max_tr.buffer, trace_buf_size); | 825 | ring_buffer_resize(max_tr.buffer, trace_buf_size, |
826 | RING_BUFFER_ALL_CPUS); | ||
845 | 827 | ||
846 | /* the test is responsible for initializing and enabling */ | 828 | /* the test is responsible for initializing and enabling */ |
847 | pr_info("Testing tracer %s: ", type->name); | 829 | pr_info("Testing tracer %s: ", type->name); |
@@ -857,7 +839,8 @@ __acquires(kernel_lock) | |||
857 | 839 | ||
858 | /* Shrink the max buffer again */ | 840 | /* Shrink the max buffer again */ |
859 | if (ring_buffer_expanded && type->use_max_tr) | 841 | if (ring_buffer_expanded && type->use_max_tr) |
860 | ring_buffer_resize(max_tr.buffer, 1); | 842 | ring_buffer_resize(max_tr.buffer, 1, |
843 | RING_BUFFER_ALL_CPUS); | ||
861 | 844 | ||
862 | printk(KERN_CONT "PASSED\n"); | 845 | printk(KERN_CONT "PASSED\n"); |
863 | } | 846 | } |
@@ -917,13 +900,6 @@ out: | |||
917 | mutex_unlock(&trace_types_lock); | 900 | mutex_unlock(&trace_types_lock); |
918 | } | 901 | } |
919 | 902 | ||
920 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | ||
921 | { | ||
922 | ftrace_disable_cpu(); | ||
923 | ring_buffer_reset_cpu(buffer, cpu); | ||
924 | ftrace_enable_cpu(); | ||
925 | } | ||
926 | |||
927 | void tracing_reset(struct trace_array *tr, int cpu) | 903 | void tracing_reset(struct trace_array *tr, int cpu) |
928 | { | 904 | { |
929 | struct ring_buffer *buffer = tr->buffer; | 905 | struct ring_buffer *buffer = tr->buffer; |
@@ -932,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
932 | 908 | ||
933 | /* Make sure all commits have finished */ | 909 | /* Make sure all commits have finished */ |
934 | synchronize_sched(); | 910 | synchronize_sched(); |
935 | __tracing_reset(buffer, cpu); | 911 | ring_buffer_reset_cpu(buffer, cpu); |
936 | 912 | ||
937 | ring_buffer_record_enable(buffer); | 913 | ring_buffer_record_enable(buffer); |
938 | } | 914 | } |
@@ -950,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
950 | tr->time_start = ftrace_now(tr->cpu); | 926 | tr->time_start = ftrace_now(tr->cpu); |
951 | 927 | ||
952 | for_each_online_cpu(cpu) | 928 | for_each_online_cpu(cpu) |
953 | __tracing_reset(buffer, cpu); | 929 | ring_buffer_reset_cpu(buffer, cpu); |
954 | 930 | ||
955 | ring_buffer_record_enable(buffer); | 931 | ring_buffer_record_enable(buffer); |
956 | } | 932 | } |
@@ -1498,25 +1474,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1498 | 1474 | ||
1499 | #endif /* CONFIG_STACKTRACE */ | 1475 | #endif /* CONFIG_STACKTRACE */ |
1500 | 1476 | ||
1477 | /* created for use with alloc_percpu */ | ||
1478 | struct trace_buffer_struct { | ||
1479 | char buffer[TRACE_BUF_SIZE]; | ||
1480 | }; | ||
1481 | |||
1482 | static struct trace_buffer_struct *trace_percpu_buffer; | ||
1483 | static struct trace_buffer_struct *trace_percpu_sirq_buffer; | ||
1484 | static struct trace_buffer_struct *trace_percpu_irq_buffer; | ||
1485 | static struct trace_buffer_struct *trace_percpu_nmi_buffer; | ||
1486 | |||
1487 | /* | ||
1488 | * The buffer used is dependent on the context. There is a per cpu | ||
1489 | * buffer for normal context, softirq contex, hard irq context and | ||
1490 | * for NMI context. Thise allows for lockless recording. | ||
1491 | * | ||
1492 | * Note, if the buffers failed to be allocated, then this returns NULL | ||
1493 | */ | ||
1494 | static char *get_trace_buf(void) | ||
1495 | { | ||
1496 | struct trace_buffer_struct *percpu_buffer; | ||
1497 | struct trace_buffer_struct *buffer; | ||
1498 | |||
1499 | /* | ||
1500 | * If we have allocated per cpu buffers, then we do not | ||
1501 | * need to do any locking. | ||
1502 | */ | ||
1503 | if (in_nmi()) | ||
1504 | percpu_buffer = trace_percpu_nmi_buffer; | ||
1505 | else if (in_irq()) | ||
1506 | percpu_buffer = trace_percpu_irq_buffer; | ||
1507 | else if (in_softirq()) | ||
1508 | percpu_buffer = trace_percpu_sirq_buffer; | ||
1509 | else | ||
1510 | percpu_buffer = trace_percpu_buffer; | ||
1511 | |||
1512 | if (!percpu_buffer) | ||
1513 | return NULL; | ||
1514 | |||
1515 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | ||
1516 | |||
1517 | return buffer->buffer; | ||
1518 | } | ||
1519 | |||
1520 | static int alloc_percpu_trace_buffer(void) | ||
1521 | { | ||
1522 | struct trace_buffer_struct *buffers; | ||
1523 | struct trace_buffer_struct *sirq_buffers; | ||
1524 | struct trace_buffer_struct *irq_buffers; | ||
1525 | struct trace_buffer_struct *nmi_buffers; | ||
1526 | |||
1527 | buffers = alloc_percpu(struct trace_buffer_struct); | ||
1528 | if (!buffers) | ||
1529 | goto err_warn; | ||
1530 | |||
1531 | sirq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1532 | if (!sirq_buffers) | ||
1533 | goto err_sirq; | ||
1534 | |||
1535 | irq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1536 | if (!irq_buffers) | ||
1537 | goto err_irq; | ||
1538 | |||
1539 | nmi_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1540 | if (!nmi_buffers) | ||
1541 | goto err_nmi; | ||
1542 | |||
1543 | trace_percpu_buffer = buffers; | ||
1544 | trace_percpu_sirq_buffer = sirq_buffers; | ||
1545 | trace_percpu_irq_buffer = irq_buffers; | ||
1546 | trace_percpu_nmi_buffer = nmi_buffers; | ||
1547 | |||
1548 | return 0; | ||
1549 | |||
1550 | err_nmi: | ||
1551 | free_percpu(irq_buffers); | ||
1552 | err_irq: | ||
1553 | free_percpu(sirq_buffers); | ||
1554 | err_sirq: | ||
1555 | free_percpu(buffers); | ||
1556 | err_warn: | ||
1557 | WARN(1, "Could not allocate percpu trace_printk buffer"); | ||
1558 | return -ENOMEM; | ||
1559 | } | ||
1560 | |||
1561 | void trace_printk_init_buffers(void) | ||
1562 | { | ||
1563 | static int buffers_allocated; | ||
1564 | |||
1565 | if (buffers_allocated) | ||
1566 | return; | ||
1567 | |||
1568 | if (alloc_percpu_trace_buffer()) | ||
1569 | return; | ||
1570 | |||
1571 | pr_info("ftrace: Allocated trace_printk buffers\n"); | ||
1572 | |||
1573 | buffers_allocated = 1; | ||
1574 | } | ||
1575 | |||
1501 | /** | 1576 | /** |
1502 | * trace_vbprintk - write binary msg to tracing buffer | 1577 | * trace_vbprintk - write binary msg to tracing buffer |
1503 | * | 1578 | * |
1504 | */ | 1579 | */ |
1505 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1580 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1506 | { | 1581 | { |
1507 | static arch_spinlock_t trace_buf_lock = | ||
1508 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||
1509 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
1510 | |||
1511 | struct ftrace_event_call *call = &event_bprint; | 1582 | struct ftrace_event_call *call = &event_bprint; |
1512 | struct ring_buffer_event *event; | 1583 | struct ring_buffer_event *event; |
1513 | struct ring_buffer *buffer; | 1584 | struct ring_buffer *buffer; |
1514 | struct trace_array *tr = &global_trace; | 1585 | struct trace_array *tr = &global_trace; |
1515 | struct trace_array_cpu *data; | ||
1516 | struct bprint_entry *entry; | 1586 | struct bprint_entry *entry; |
1517 | unsigned long flags; | 1587 | unsigned long flags; |
1518 | int disable; | 1588 | char *tbuffer; |
1519 | int cpu, len = 0, size, pc; | 1589 | int len = 0, size, pc; |
1520 | 1590 | ||
1521 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1591 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
1522 | return 0; | 1592 | return 0; |
@@ -1526,43 +1596,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1526 | 1596 | ||
1527 | pc = preempt_count(); | 1597 | pc = preempt_count(); |
1528 | preempt_disable_notrace(); | 1598 | preempt_disable_notrace(); |
1529 | cpu = raw_smp_processor_id(); | ||
1530 | data = tr->data[cpu]; | ||
1531 | 1599 | ||
1532 | disable = atomic_inc_return(&data->disabled); | 1600 | tbuffer = get_trace_buf(); |
1533 | if (unlikely(disable != 1)) | 1601 | if (!tbuffer) { |
1602 | len = 0; | ||
1534 | goto out; | 1603 | goto out; |
1604 | } | ||
1535 | 1605 | ||
1536 | /* Lockdep uses trace_printk for lock tracing */ | 1606 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
1537 | local_irq_save(flags); | ||
1538 | arch_spin_lock(&trace_buf_lock); | ||
1539 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1540 | 1607 | ||
1541 | if (len > TRACE_BUF_SIZE || len < 0) | 1608 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
1542 | goto out_unlock; | 1609 | goto out; |
1543 | 1610 | ||
1611 | local_save_flags(flags); | ||
1544 | size = sizeof(*entry) + sizeof(u32) * len; | 1612 | size = sizeof(*entry) + sizeof(u32) * len; |
1545 | buffer = tr->buffer; | 1613 | buffer = tr->buffer; |
1546 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 1614 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
1547 | flags, pc); | 1615 | flags, pc); |
1548 | if (!event) | 1616 | if (!event) |
1549 | goto out_unlock; | 1617 | goto out; |
1550 | entry = ring_buffer_event_data(event); | 1618 | entry = ring_buffer_event_data(event); |
1551 | entry->ip = ip; | 1619 | entry->ip = ip; |
1552 | entry->fmt = fmt; | 1620 | entry->fmt = fmt; |
1553 | 1621 | ||
1554 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1622 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
1555 | if (!filter_check_discard(call, entry, buffer, event)) { | 1623 | if (!filter_check_discard(call, entry, buffer, event)) { |
1556 | ring_buffer_unlock_commit(buffer, event); | 1624 | ring_buffer_unlock_commit(buffer, event); |
1557 | ftrace_trace_stack(buffer, flags, 6, pc); | 1625 | ftrace_trace_stack(buffer, flags, 6, pc); |
1558 | } | 1626 | } |
1559 | 1627 | ||
1560 | out_unlock: | ||
1561 | arch_spin_unlock(&trace_buf_lock); | ||
1562 | local_irq_restore(flags); | ||
1563 | |||
1564 | out: | 1628 | out: |
1565 | atomic_dec_return(&data->disabled); | ||
1566 | preempt_enable_notrace(); | 1629 | preempt_enable_notrace(); |
1567 | unpause_graph_tracing(); | 1630 | unpause_graph_tracing(); |
1568 | 1631 | ||
@@ -1588,58 +1651,53 @@ int trace_array_printk(struct trace_array *tr, | |||
1588 | int trace_array_vprintk(struct trace_array *tr, | 1651 | int trace_array_vprintk(struct trace_array *tr, |
1589 | unsigned long ip, const char *fmt, va_list args) | 1652 | unsigned long ip, const char *fmt, va_list args) |
1590 | { | 1653 | { |
1591 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
1592 | static char trace_buf[TRACE_BUF_SIZE]; | ||
1593 | |||
1594 | struct ftrace_event_call *call = &event_print; | 1654 | struct ftrace_event_call *call = &event_print; |
1595 | struct ring_buffer_event *event; | 1655 | struct ring_buffer_event *event; |
1596 | struct ring_buffer *buffer; | 1656 | struct ring_buffer *buffer; |
1597 | struct trace_array_cpu *data; | 1657 | int len = 0, size, pc; |
1598 | int cpu, len = 0, size, pc; | ||
1599 | struct print_entry *entry; | 1658 | struct print_entry *entry; |
1600 | unsigned long irq_flags; | 1659 | unsigned long flags; |
1601 | int disable; | 1660 | char *tbuffer; |
1602 | 1661 | ||
1603 | if (tracing_disabled || tracing_selftest_running) | 1662 | if (tracing_disabled || tracing_selftest_running) |
1604 | return 0; | 1663 | return 0; |
1605 | 1664 | ||
1665 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
1666 | pause_graph_tracing(); | ||
1667 | |||
1606 | pc = preempt_count(); | 1668 | pc = preempt_count(); |
1607 | preempt_disable_notrace(); | 1669 | preempt_disable_notrace(); |
1608 | cpu = raw_smp_processor_id(); | ||
1609 | data = tr->data[cpu]; | ||
1610 | 1670 | ||
1611 | disable = atomic_inc_return(&data->disabled); | 1671 | |
1612 | if (unlikely(disable != 1)) | 1672 | tbuffer = get_trace_buf(); |
1673 | if (!tbuffer) { | ||
1674 | len = 0; | ||
1613 | goto out; | 1675 | goto out; |
1676 | } | ||
1614 | 1677 | ||
1615 | pause_graph_tracing(); | 1678 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
1616 | raw_local_irq_save(irq_flags); | 1679 | if (len > TRACE_BUF_SIZE) |
1617 | arch_spin_lock(&trace_buf_lock); | 1680 | goto out; |
1618 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1619 | 1681 | ||
1682 | local_save_flags(flags); | ||
1620 | size = sizeof(*entry) + len + 1; | 1683 | size = sizeof(*entry) + len + 1; |
1621 | buffer = tr->buffer; | 1684 | buffer = tr->buffer; |
1622 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1685 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
1623 | irq_flags, pc); | 1686 | flags, pc); |
1624 | if (!event) | 1687 | if (!event) |
1625 | goto out_unlock; | 1688 | goto out; |
1626 | entry = ring_buffer_event_data(event); | 1689 | entry = ring_buffer_event_data(event); |
1627 | entry->ip = ip; | 1690 | entry->ip = ip; |
1628 | 1691 | ||
1629 | memcpy(&entry->buf, trace_buf, len); | 1692 | memcpy(&entry->buf, tbuffer, len); |
1630 | entry->buf[len] = '\0'; | 1693 | entry->buf[len] = '\0'; |
1631 | if (!filter_check_discard(call, entry, buffer, event)) { | 1694 | if (!filter_check_discard(call, entry, buffer, event)) { |
1632 | ring_buffer_unlock_commit(buffer, event); | 1695 | ring_buffer_unlock_commit(buffer, event); |
1633 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | 1696 | ftrace_trace_stack(buffer, flags, 6, pc); |
1634 | } | 1697 | } |
1635 | |||
1636 | out_unlock: | ||
1637 | arch_spin_unlock(&trace_buf_lock); | ||
1638 | raw_local_irq_restore(irq_flags); | ||
1639 | unpause_graph_tracing(); | ||
1640 | out: | 1698 | out: |
1641 | atomic_dec_return(&data->disabled); | ||
1642 | preempt_enable_notrace(); | 1699 | preempt_enable_notrace(); |
1700 | unpause_graph_tracing(); | ||
1643 | 1701 | ||
1644 | return len; | 1702 | return len; |
1645 | } | 1703 | } |
@@ -1652,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); | |||
1652 | 1710 | ||
1653 | static void trace_iterator_increment(struct trace_iterator *iter) | 1711 | static void trace_iterator_increment(struct trace_iterator *iter) |
1654 | { | 1712 | { |
1655 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1656 | ftrace_disable_cpu(); | ||
1657 | |||
1658 | iter->idx++; | 1713 | iter->idx++; |
1659 | if (iter->buffer_iter[iter->cpu]) | 1714 | if (iter->buffer_iter[iter->cpu]) |
1660 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1715 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1661 | |||
1662 | ftrace_enable_cpu(); | ||
1663 | } | 1716 | } |
1664 | 1717 | ||
1665 | static struct trace_entry * | 1718 | static struct trace_entry * |
@@ -1669,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
1669 | struct ring_buffer_event *event; | 1722 | struct ring_buffer_event *event; |
1670 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1723 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
1671 | 1724 | ||
1672 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1673 | ftrace_disable_cpu(); | ||
1674 | |||
1675 | if (buf_iter) | 1725 | if (buf_iter) |
1676 | event = ring_buffer_iter_peek(buf_iter, ts); | 1726 | event = ring_buffer_iter_peek(buf_iter, ts); |
1677 | else | 1727 | else |
1678 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 1728 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1679 | lost_events); | 1729 | lost_events); |
1680 | 1730 | ||
1681 | ftrace_enable_cpu(); | ||
1682 | |||
1683 | if (event) { | 1731 | if (event) { |
1684 | iter->ent_size = ring_buffer_event_length(event); | 1732 | iter->ent_size = ring_buffer_event_length(event); |
1685 | return ring_buffer_event_data(event); | 1733 | return ring_buffer_event_data(event); |
@@ -1769,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) | |||
1769 | 1817 | ||
1770 | static void trace_consume(struct trace_iterator *iter) | 1818 | static void trace_consume(struct trace_iterator *iter) |
1771 | { | 1819 | { |
1772 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1773 | ftrace_disable_cpu(); | ||
1774 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 1820 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1775 | &iter->lost_events); | 1821 | &iter->lost_events); |
1776 | ftrace_enable_cpu(); | ||
1777 | } | 1822 | } |
1778 | 1823 | ||
1779 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1824 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
@@ -1862,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1862 | iter->cpu = 0; | 1907 | iter->cpu = 0; |
1863 | iter->idx = -1; | 1908 | iter->idx = -1; |
1864 | 1909 | ||
1865 | ftrace_disable_cpu(); | ||
1866 | |||
1867 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1910 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1868 | for_each_tracing_cpu(cpu) | 1911 | for_each_tracing_cpu(cpu) |
1869 | tracing_iter_reset(iter, cpu); | 1912 | tracing_iter_reset(iter, cpu); |
1870 | } else | 1913 | } else |
1871 | tracing_iter_reset(iter, cpu_file); | 1914 | tracing_iter_reset(iter, cpu_file); |
1872 | 1915 | ||
1873 | ftrace_enable_cpu(); | ||
1874 | |||
1875 | iter->leftover = 0; | 1916 | iter->leftover = 0; |
1876 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1917 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1877 | ; | 1918 | ; |
@@ -2332,15 +2373,13 @@ static struct trace_iterator * | |||
2332 | __tracing_open(struct inode *inode, struct file *file) | 2373 | __tracing_open(struct inode *inode, struct file *file) |
2333 | { | 2374 | { |
2334 | long cpu_file = (long) inode->i_private; | 2375 | long cpu_file = (long) inode->i_private; |
2335 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
2336 | struct trace_iterator *iter; | 2376 | struct trace_iterator *iter; |
2337 | struct seq_file *m; | 2377 | int cpu; |
2338 | int cpu, ret; | ||
2339 | 2378 | ||
2340 | if (tracing_disabled) | 2379 | if (tracing_disabled) |
2341 | return ERR_PTR(-ENODEV); | 2380 | return ERR_PTR(-ENODEV); |
2342 | 2381 | ||
2343 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2382 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); |
2344 | if (!iter) | 2383 | if (!iter) |
2345 | return ERR_PTR(-ENOMEM); | 2384 | return ERR_PTR(-ENOMEM); |
2346 | 2385 | ||
@@ -2397,32 +2436,15 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2397 | tracing_iter_reset(iter, cpu); | 2436 | tracing_iter_reset(iter, cpu); |
2398 | } | 2437 | } |
2399 | 2438 | ||
2400 | ret = seq_open(file, &tracer_seq_ops); | ||
2401 | if (ret < 0) { | ||
2402 | fail_ret = ERR_PTR(ret); | ||
2403 | goto fail_buffer; | ||
2404 | } | ||
2405 | |||
2406 | m = file->private_data; | ||
2407 | m->private = iter; | ||
2408 | |||
2409 | mutex_unlock(&trace_types_lock); | 2439 | mutex_unlock(&trace_types_lock); |
2410 | 2440 | ||
2411 | return iter; | 2441 | return iter; |
2412 | 2442 | ||
2413 | fail_buffer: | ||
2414 | for_each_tracing_cpu(cpu) { | ||
2415 | if (iter->buffer_iter[cpu]) | ||
2416 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
2417 | } | ||
2418 | free_cpumask_var(iter->started); | ||
2419 | tracing_start(); | ||
2420 | fail: | 2443 | fail: |
2421 | mutex_unlock(&trace_types_lock); | 2444 | mutex_unlock(&trace_types_lock); |
2422 | kfree(iter->trace); | 2445 | kfree(iter->trace); |
2423 | kfree(iter); | 2446 | seq_release_private(inode, file); |
2424 | 2447 | return ERR_PTR(-ENOMEM); | |
2425 | return fail_ret; | ||
2426 | } | 2448 | } |
2427 | 2449 | ||
2428 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2450 | int tracing_open_generic(struct inode *inode, struct file *filp) |
@@ -2458,11 +2480,10 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2458 | tracing_start(); | 2480 | tracing_start(); |
2459 | mutex_unlock(&trace_types_lock); | 2481 | mutex_unlock(&trace_types_lock); |
2460 | 2482 | ||
2461 | seq_release(inode, file); | ||
2462 | mutex_destroy(&iter->mutex); | 2483 | mutex_destroy(&iter->mutex); |
2463 | free_cpumask_var(iter->started); | 2484 | free_cpumask_var(iter->started); |
2464 | kfree(iter->trace); | 2485 | kfree(iter->trace); |
2465 | kfree(iter); | 2486 | seq_release_private(inode, file); |
2466 | return 0; | 2487 | return 0; |
2467 | } | 2488 | } |
2468 | 2489 | ||
@@ -2974,7 +2995,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr) | |||
2974 | return t->init(tr); | 2995 | return t->init(tr); |
2975 | } | 2996 | } |
2976 | 2997 | ||
2977 | static int __tracing_resize_ring_buffer(unsigned long size) | 2998 | static void set_buffer_entries(struct trace_array *tr, unsigned long val) |
2999 | { | ||
3000 | int cpu; | ||
3001 | for_each_tracing_cpu(cpu) | ||
3002 | tr->data[cpu]->entries = val; | ||
3003 | } | ||
3004 | |||
3005 | static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | ||
2978 | { | 3006 | { |
2979 | int ret; | 3007 | int ret; |
2980 | 3008 | ||
@@ -2985,19 +3013,32 @@ static int __tracing_resize_ring_buffer(unsigned long size) | |||
2985 | */ | 3013 | */ |
2986 | ring_buffer_expanded = 1; | 3014 | ring_buffer_expanded = 1; |
2987 | 3015 | ||
2988 | ret = ring_buffer_resize(global_trace.buffer, size); | 3016 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); |
2989 | if (ret < 0) | 3017 | if (ret < 0) |
2990 | return ret; | 3018 | return ret; |
2991 | 3019 | ||
2992 | if (!current_trace->use_max_tr) | 3020 | if (!current_trace->use_max_tr) |
2993 | goto out; | 3021 | goto out; |
2994 | 3022 | ||
2995 | ret = ring_buffer_resize(max_tr.buffer, size); | 3023 | ret = ring_buffer_resize(max_tr.buffer, size, cpu); |
2996 | if (ret < 0) { | 3024 | if (ret < 0) { |
2997 | int r; | 3025 | int r = 0; |
3026 | |||
3027 | if (cpu == RING_BUFFER_ALL_CPUS) { | ||
3028 | int i; | ||
3029 | for_each_tracing_cpu(i) { | ||
3030 | r = ring_buffer_resize(global_trace.buffer, | ||
3031 | global_trace.data[i]->entries, | ||
3032 | i); | ||
3033 | if (r < 0) | ||
3034 | break; | ||
3035 | } | ||
3036 | } else { | ||
3037 | r = ring_buffer_resize(global_trace.buffer, | ||
3038 | global_trace.data[cpu]->entries, | ||
3039 | cpu); | ||
3040 | } | ||
2998 | 3041 | ||
2999 | r = ring_buffer_resize(global_trace.buffer, | ||
3000 | global_trace.entries); | ||
3001 | if (r < 0) { | 3042 | if (r < 0) { |
3002 | /* | 3043 | /* |
3003 | * AARGH! We are left with different | 3044 | * AARGH! We are left with different |
@@ -3019,14 +3060,21 @@ static int __tracing_resize_ring_buffer(unsigned long size) | |||
3019 | return ret; | 3060 | return ret; |
3020 | } | 3061 | } |
3021 | 3062 | ||
3022 | max_tr.entries = size; | 3063 | if (cpu == RING_BUFFER_ALL_CPUS) |
3064 | set_buffer_entries(&max_tr, size); | ||
3065 | else | ||
3066 | max_tr.data[cpu]->entries = size; | ||
3067 | |||
3023 | out: | 3068 | out: |
3024 | global_trace.entries = size; | 3069 | if (cpu == RING_BUFFER_ALL_CPUS) |
3070 | set_buffer_entries(&global_trace, size); | ||
3071 | else | ||
3072 | global_trace.data[cpu]->entries = size; | ||
3025 | 3073 | ||
3026 | return ret; | 3074 | return ret; |
3027 | } | 3075 | } |
3028 | 3076 | ||
3029 | static ssize_t tracing_resize_ring_buffer(unsigned long size) | 3077 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) |
3030 | { | 3078 | { |
3031 | int cpu, ret = size; | 3079 | int cpu, ret = size; |
3032 | 3080 | ||
@@ -3042,12 +3090,19 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size) | |||
3042 | atomic_inc(&max_tr.data[cpu]->disabled); | 3090 | atomic_inc(&max_tr.data[cpu]->disabled); |
3043 | } | 3091 | } |
3044 | 3092 | ||
3045 | if (size != global_trace.entries) | 3093 | if (cpu_id != RING_BUFFER_ALL_CPUS) { |
3046 | ret = __tracing_resize_ring_buffer(size); | 3094 | /* make sure, this cpu is enabled in the mask */ |
3095 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { | ||
3096 | ret = -EINVAL; | ||
3097 | goto out; | ||
3098 | } | ||
3099 | } | ||
3047 | 3100 | ||
3101 | ret = __tracing_resize_ring_buffer(size, cpu_id); | ||
3048 | if (ret < 0) | 3102 | if (ret < 0) |
3049 | ret = -ENOMEM; | 3103 | ret = -ENOMEM; |
3050 | 3104 | ||
3105 | out: | ||
3051 | for_each_tracing_cpu(cpu) { | 3106 | for_each_tracing_cpu(cpu) { |
3052 | if (global_trace.data[cpu]) | 3107 | if (global_trace.data[cpu]) |
3053 | atomic_dec(&global_trace.data[cpu]->disabled); | 3108 | atomic_dec(&global_trace.data[cpu]->disabled); |
@@ -3078,7 +3133,8 @@ int tracing_update_buffers(void) | |||
3078 | 3133 | ||
3079 | mutex_lock(&trace_types_lock); | 3134 | mutex_lock(&trace_types_lock); |
3080 | if (!ring_buffer_expanded) | 3135 | if (!ring_buffer_expanded) |
3081 | ret = __tracing_resize_ring_buffer(trace_buf_size); | 3136 | ret = __tracing_resize_ring_buffer(trace_buf_size, |
3137 | RING_BUFFER_ALL_CPUS); | ||
3082 | mutex_unlock(&trace_types_lock); | 3138 | mutex_unlock(&trace_types_lock); |
3083 | 3139 | ||
3084 | return ret; | 3140 | return ret; |
@@ -3102,7 +3158,8 @@ static int tracing_set_tracer(const char *buf) | |||
3102 | mutex_lock(&trace_types_lock); | 3158 | mutex_lock(&trace_types_lock); |
3103 | 3159 | ||
3104 | if (!ring_buffer_expanded) { | 3160 | if (!ring_buffer_expanded) { |
3105 | ret = __tracing_resize_ring_buffer(trace_buf_size); | 3161 | ret = __tracing_resize_ring_buffer(trace_buf_size, |
3162 | RING_BUFFER_ALL_CPUS); | ||
3106 | if (ret < 0) | 3163 | if (ret < 0) |
3107 | goto out; | 3164 | goto out; |
3108 | ret = 0; | 3165 | ret = 0; |
@@ -3128,8 +3185,8 @@ static int tracing_set_tracer(const char *buf) | |||
3128 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 3185 | * The max_tr ring buffer has some state (e.g. ring->clock) and |
3129 | * we want preserve it. | 3186 | * we want preserve it. |
3130 | */ | 3187 | */ |
3131 | ring_buffer_resize(max_tr.buffer, 1); | 3188 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
3132 | max_tr.entries = 1; | 3189 | set_buffer_entries(&max_tr, 1); |
3133 | } | 3190 | } |
3134 | destroy_trace_option_files(topts); | 3191 | destroy_trace_option_files(topts); |
3135 | 3192 | ||
@@ -3137,10 +3194,17 @@ static int tracing_set_tracer(const char *buf) | |||
3137 | 3194 | ||
3138 | topts = create_trace_option_files(current_trace); | 3195 | topts = create_trace_option_files(current_trace); |
3139 | if (current_trace->use_max_tr) { | 3196 | if (current_trace->use_max_tr) { |
3140 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | 3197 | int cpu; |
3141 | if (ret < 0) | 3198 | /* we need to make per cpu buffer sizes equivalent */ |
3142 | goto out; | 3199 | for_each_tracing_cpu(cpu) { |
3143 | max_tr.entries = global_trace.entries; | 3200 | ret = ring_buffer_resize(max_tr.buffer, |
3201 | global_trace.data[cpu]->entries, | ||
3202 | cpu); | ||
3203 | if (ret < 0) | ||
3204 | goto out; | ||
3205 | max_tr.data[cpu]->entries = | ||
3206 | global_trace.data[cpu]->entries; | ||
3207 | } | ||
3144 | } | 3208 | } |
3145 | 3209 | ||
3146 | if (t->init) { | 3210 | if (t->init) { |
@@ -3642,30 +3706,82 @@ out_err: | |||
3642 | goto out; | 3706 | goto out; |
3643 | } | 3707 | } |
3644 | 3708 | ||
3709 | struct ftrace_entries_info { | ||
3710 | struct trace_array *tr; | ||
3711 | int cpu; | ||
3712 | }; | ||
3713 | |||
3714 | static int tracing_entries_open(struct inode *inode, struct file *filp) | ||
3715 | { | ||
3716 | struct ftrace_entries_info *info; | ||
3717 | |||
3718 | if (tracing_disabled) | ||
3719 | return -ENODEV; | ||
3720 | |||
3721 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
3722 | if (!info) | ||
3723 | return -ENOMEM; | ||
3724 | |||
3725 | info->tr = &global_trace; | ||
3726 | info->cpu = (unsigned long)inode->i_private; | ||
3727 | |||
3728 | filp->private_data = info; | ||
3729 | |||
3730 | return 0; | ||
3731 | } | ||
3732 | |||
3645 | static ssize_t | 3733 | static ssize_t |
3646 | tracing_entries_read(struct file *filp, char __user *ubuf, | 3734 | tracing_entries_read(struct file *filp, char __user *ubuf, |
3647 | size_t cnt, loff_t *ppos) | 3735 | size_t cnt, loff_t *ppos) |
3648 | { | 3736 | { |
3649 | struct trace_array *tr = filp->private_data; | 3737 | struct ftrace_entries_info *info = filp->private_data; |
3650 | char buf[96]; | 3738 | struct trace_array *tr = info->tr; |
3651 | int r; | 3739 | char buf[64]; |
3740 | int r = 0; | ||
3741 | ssize_t ret; | ||
3652 | 3742 | ||
3653 | mutex_lock(&trace_types_lock); | 3743 | mutex_lock(&trace_types_lock); |
3654 | if (!ring_buffer_expanded) | 3744 | |
3655 | r = sprintf(buf, "%lu (expanded: %lu)\n", | 3745 | if (info->cpu == RING_BUFFER_ALL_CPUS) { |
3656 | tr->entries >> 10, | 3746 | int cpu, buf_size_same; |
3657 | trace_buf_size >> 10); | 3747 | unsigned long size; |
3658 | else | 3748 | |
3659 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | 3749 | size = 0; |
3750 | buf_size_same = 1; | ||
3751 | /* check if all cpu sizes are same */ | ||
3752 | for_each_tracing_cpu(cpu) { | ||
3753 | /* fill in the size from first enabled cpu */ | ||
3754 | if (size == 0) | ||
3755 | size = tr->data[cpu]->entries; | ||
3756 | if (size != tr->data[cpu]->entries) { | ||
3757 | buf_size_same = 0; | ||
3758 | break; | ||
3759 | } | ||
3760 | } | ||
3761 | |||
3762 | if (buf_size_same) { | ||
3763 | if (!ring_buffer_expanded) | ||
3764 | r = sprintf(buf, "%lu (expanded: %lu)\n", | ||
3765 | size >> 10, | ||
3766 | trace_buf_size >> 10); | ||
3767 | else | ||
3768 | r = sprintf(buf, "%lu\n", size >> 10); | ||
3769 | } else | ||
3770 | r = sprintf(buf, "X\n"); | ||
3771 | } else | ||
3772 | r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); | ||
3773 | |||
3660 | mutex_unlock(&trace_types_lock); | 3774 | mutex_unlock(&trace_types_lock); |
3661 | 3775 | ||
3662 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3776 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
3777 | return ret; | ||
3663 | } | 3778 | } |
3664 | 3779 | ||
3665 | static ssize_t | 3780 | static ssize_t |
3666 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 3781 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
3667 | size_t cnt, loff_t *ppos) | 3782 | size_t cnt, loff_t *ppos) |
3668 | { | 3783 | { |
3784 | struct ftrace_entries_info *info = filp->private_data; | ||
3669 | unsigned long val; | 3785 | unsigned long val; |
3670 | int ret; | 3786 | int ret; |
3671 | 3787 | ||
@@ -3680,7 +3796,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3680 | /* value is in KB */ | 3796 | /* value is in KB */ |
3681 | val <<= 10; | 3797 | val <<= 10; |
3682 | 3798 | ||
3683 | ret = tracing_resize_ring_buffer(val); | 3799 | ret = tracing_resize_ring_buffer(val, info->cpu); |
3684 | if (ret < 0) | 3800 | if (ret < 0) |
3685 | return ret; | 3801 | return ret; |
3686 | 3802 | ||
@@ -3689,6 +3805,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3689 | return cnt; | 3805 | return cnt; |
3690 | } | 3806 | } |
3691 | 3807 | ||
3808 | static int | ||
3809 | tracing_entries_release(struct inode *inode, struct file *filp) | ||
3810 | { | ||
3811 | struct ftrace_entries_info *info = filp->private_data; | ||
3812 | |||
3813 | kfree(info); | ||
3814 | |||
3815 | return 0; | ||
3816 | } | ||
3817 | |||
3692 | static ssize_t | 3818 | static ssize_t |
3693 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | 3819 | tracing_total_entries_read(struct file *filp, char __user *ubuf, |
3694 | size_t cnt, loff_t *ppos) | 3820 | size_t cnt, loff_t *ppos) |
@@ -3700,7 +3826,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, | |||
3700 | 3826 | ||
3701 | mutex_lock(&trace_types_lock); | 3827 | mutex_lock(&trace_types_lock); |
3702 | for_each_tracing_cpu(cpu) { | 3828 | for_each_tracing_cpu(cpu) { |
3703 | size += tr->entries >> 10; | 3829 | size += tr->data[cpu]->entries >> 10; |
3704 | if (!ring_buffer_expanded) | 3830 | if (!ring_buffer_expanded) |
3705 | expanded_size += trace_buf_size >> 10; | 3831 | expanded_size += trace_buf_size >> 10; |
3706 | } | 3832 | } |
@@ -3734,7 +3860,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) | |||
3734 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 3860 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
3735 | tracing_off(); | 3861 | tracing_off(); |
3736 | /* resize the ring buffer to 0 */ | 3862 | /* resize the ring buffer to 0 */ |
3737 | tracing_resize_ring_buffer(0); | 3863 | tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); |
3738 | 3864 | ||
3739 | return 0; | 3865 | return 0; |
3740 | } | 3866 | } |
@@ -3933,9 +4059,10 @@ static const struct file_operations tracing_pipe_fops = { | |||
3933 | }; | 4059 | }; |
3934 | 4060 | ||
3935 | static const struct file_operations tracing_entries_fops = { | 4061 | static const struct file_operations tracing_entries_fops = { |
3936 | .open = tracing_open_generic, | 4062 | .open = tracing_entries_open, |
3937 | .read = tracing_entries_read, | 4063 | .read = tracing_entries_read, |
3938 | .write = tracing_entries_write, | 4064 | .write = tracing_entries_write, |
4065 | .release = tracing_entries_release, | ||
3939 | .llseek = generic_file_llseek, | 4066 | .llseek = generic_file_llseek, |
3940 | }; | 4067 | }; |
3941 | 4068 | ||
@@ -4387,6 +4514,9 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
4387 | 4514 | ||
4388 | trace_create_file("stats", 0444, d_cpu, | 4515 | trace_create_file("stats", 0444, d_cpu, |
4389 | (void *) cpu, &tracing_stats_fops); | 4516 | (void *) cpu, &tracing_stats_fops); |
4517 | |||
4518 | trace_create_file("buffer_size_kb", 0444, d_cpu, | ||
4519 | (void *) cpu, &tracing_entries_fops); | ||
4390 | } | 4520 | } |
4391 | 4521 | ||
4392 | #ifdef CONFIG_FTRACE_SELFTEST | 4522 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -4716,7 +4846,7 @@ static __init int tracer_init_debugfs(void) | |||
4716 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 4846 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
4717 | 4847 | ||
4718 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 4848 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
4719 | &global_trace, &tracing_entries_fops); | 4849 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); |
4720 | 4850 | ||
4721 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 4851 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, |
4722 | &global_trace, &tracing_total_entries_fops); | 4852 | &global_trace, &tracing_total_entries_fops); |
@@ -4955,6 +5085,10 @@ __init static int tracer_alloc_buffers(void) | |||
4955 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 5085 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4956 | goto out_free_buffer_mask; | 5086 | goto out_free_buffer_mask; |
4957 | 5087 | ||
5088 | /* Only allocate trace_printk buffers if a trace_printk exists */ | ||
5089 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | ||
5090 | trace_printk_init_buffers(); | ||
5091 | |||
4958 | /* To save memory, keep the ring buffer size to its minimum */ | 5092 | /* To save memory, keep the ring buffer size to its minimum */ |
4959 | if (ring_buffer_expanded) | 5093 | if (ring_buffer_expanded) |
4960 | ring_buf_size = trace_buf_size; | 5094 | ring_buf_size = trace_buf_size; |
@@ -4973,7 +5107,6 @@ __init static int tracer_alloc_buffers(void) | |||
4973 | WARN_ON(1); | 5107 | WARN_ON(1); |
4974 | goto out_free_cpumask; | 5108 | goto out_free_cpumask; |
4975 | } | 5109 | } |
4976 | global_trace.entries = ring_buffer_size(global_trace.buffer); | ||
4977 | if (global_trace.buffer_disabled) | 5110 | if (global_trace.buffer_disabled) |
4978 | tracing_off(); | 5111 | tracing_off(); |
4979 | 5112 | ||
@@ -4986,7 +5119,6 @@ __init static int tracer_alloc_buffers(void) | |||
4986 | ring_buffer_free(global_trace.buffer); | 5119 | ring_buffer_free(global_trace.buffer); |
4987 | goto out_free_cpumask; | 5120 | goto out_free_cpumask; |
4988 | } | 5121 | } |
4989 | max_tr.entries = 1; | ||
4990 | #endif | 5122 | #endif |
4991 | 5123 | ||
4992 | /* Allocate the first page for all buffers */ | 5124 | /* Allocate the first page for all buffers */ |
@@ -4995,6 +5127,11 @@ __init static int tracer_alloc_buffers(void) | |||
4995 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 5127 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4996 | } | 5128 | } |
4997 | 5129 | ||
5130 | set_buffer_entries(&global_trace, ring_buf_size); | ||
5131 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
5132 | set_buffer_entries(&max_tr, 1); | ||
5133 | #endif | ||
5134 | |||
4998 | trace_init_cmdlines(); | 5135 | trace_init_cmdlines(); |
4999 | 5136 | ||
5000 | register_tracer(&nop_trace); | 5137 | register_tracer(&nop_trace); |