diff options
-rw-r--r-- | include/linux/kernel.h | 13 | ||||
-rw-r--r-- | kernel/trace/trace.c | 184 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 4 |
4 files changed, 148 insertions, 55 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 645231c373c8..c0d34420a913 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -480,15 +480,16 @@ do { \ | |||
480 | 480 | ||
481 | #define trace_printk(fmt, args...) \ | 481 | #define trace_printk(fmt, args...) \ |
482 | do { \ | 482 | do { \ |
483 | static const char *trace_printk_fmt \ | ||
484 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
485 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
486 | \ | ||
483 | __trace_printk_check_format(fmt, ##args); \ | 487 | __trace_printk_check_format(fmt, ##args); \ |
484 | if (__builtin_constant_p(fmt)) { \ | ||
485 | static const char *trace_printk_fmt \ | ||
486 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
487 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
488 | \ | 488 | \ |
489 | if (__builtin_constant_p(fmt)) \ | ||
489 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ | 490 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
490 | } else \ | 491 | else \ |
491 | __trace_printk(_THIS_IP_, fmt, ##args); \ | 492 | __trace_printk(_THIS_IP_, fmt, ##args); \ |
492 | } while (0) | 493 | } while (0) |
493 | 494 | ||
494 | extern __printf(2, 3) | 495 | extern __printf(2, 3) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed7b5d1e12f4..1ab8e35d069b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1498,25 +1498,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1498 | 1498 | ||
1499 | #endif /* CONFIG_STACKTRACE */ | 1499 | #endif /* CONFIG_STACKTRACE */ |
1500 | 1500 | ||
1501 | /* created for use with alloc_percpu */ | ||
1502 | struct trace_buffer_struct { | ||
1503 | char buffer[TRACE_BUF_SIZE]; | ||
1504 | }; | ||
1505 | |||
1506 | static struct trace_buffer_struct *trace_percpu_buffer; | ||
1507 | static struct trace_buffer_struct *trace_percpu_sirq_buffer; | ||
1508 | static struct trace_buffer_struct *trace_percpu_irq_buffer; | ||
1509 | static struct trace_buffer_struct *trace_percpu_nmi_buffer; | ||
1510 | |||
1511 | /* | ||
1512 | * The buffer used is dependent on the context. There is a per cpu | ||
1513 | * buffer for normal context, softirq contex, hard irq context and | ||
1514 | * for NMI context. Thise allows for lockless recording. | ||
1515 | * | ||
1516 | * Note, if the buffers failed to be allocated, then this returns NULL | ||
1517 | */ | ||
1518 | static char *get_trace_buf(void) | ||
1519 | { | ||
1520 | struct trace_buffer_struct *percpu_buffer; | ||
1521 | struct trace_buffer_struct *buffer; | ||
1522 | |||
1523 | /* | ||
1524 | * If we have allocated per cpu buffers, then we do not | ||
1525 | * need to do any locking. | ||
1526 | */ | ||
1527 | if (in_nmi()) | ||
1528 | percpu_buffer = trace_percpu_nmi_buffer; | ||
1529 | else if (in_irq()) | ||
1530 | percpu_buffer = trace_percpu_irq_buffer; | ||
1531 | else if (in_softirq()) | ||
1532 | percpu_buffer = trace_percpu_sirq_buffer; | ||
1533 | else | ||
1534 | percpu_buffer = trace_percpu_buffer; | ||
1535 | |||
1536 | if (!percpu_buffer) | ||
1537 | return NULL; | ||
1538 | |||
1539 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | ||
1540 | |||
1541 | return buffer->buffer; | ||
1542 | } | ||
1543 | |||
1544 | static int alloc_percpu_trace_buffer(void) | ||
1545 | { | ||
1546 | struct trace_buffer_struct *buffers; | ||
1547 | struct trace_buffer_struct *sirq_buffers; | ||
1548 | struct trace_buffer_struct *irq_buffers; | ||
1549 | struct trace_buffer_struct *nmi_buffers; | ||
1550 | |||
1551 | buffers = alloc_percpu(struct trace_buffer_struct); | ||
1552 | if (!buffers) | ||
1553 | goto err_warn; | ||
1554 | |||
1555 | sirq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1556 | if (!sirq_buffers) | ||
1557 | goto err_sirq; | ||
1558 | |||
1559 | irq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1560 | if (!irq_buffers) | ||
1561 | goto err_irq; | ||
1562 | |||
1563 | nmi_buffers = alloc_percpu(struct trace_buffer_struct); | ||
1564 | if (!nmi_buffers) | ||
1565 | goto err_nmi; | ||
1566 | |||
1567 | trace_percpu_buffer = buffers; | ||
1568 | trace_percpu_sirq_buffer = sirq_buffers; | ||
1569 | trace_percpu_irq_buffer = irq_buffers; | ||
1570 | trace_percpu_nmi_buffer = nmi_buffers; | ||
1571 | |||
1572 | return 0; | ||
1573 | |||
1574 | err_nmi: | ||
1575 | free_percpu(irq_buffers); | ||
1576 | err_irq: | ||
1577 | free_percpu(sirq_buffers); | ||
1578 | err_sirq: | ||
1579 | free_percpu(buffers); | ||
1580 | err_warn: | ||
1581 | WARN(1, "Could not allocate percpu trace_printk buffer"); | ||
1582 | return -ENOMEM; | ||
1583 | } | ||
1584 | |||
1585 | void trace_printk_init_buffers(void) | ||
1586 | { | ||
1587 | static int buffers_allocated; | ||
1588 | |||
1589 | if (buffers_allocated) | ||
1590 | return; | ||
1591 | |||
1592 | if (alloc_percpu_trace_buffer()) | ||
1593 | return; | ||
1594 | |||
1595 | pr_info("ftrace: Allocated trace_printk buffers\n"); | ||
1596 | |||
1597 | buffers_allocated = 1; | ||
1598 | } | ||
1599 | |||
1501 | /** | 1600 | /** |
1502 | * trace_vbprintk - write binary msg to tracing buffer | 1601 | * trace_vbprintk - write binary msg to tracing buffer |
1503 | * | 1602 | * |
1504 | */ | 1603 | */ |
1505 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1604 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1506 | { | 1605 | { |
1507 | static arch_spinlock_t trace_buf_lock = | ||
1508 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||
1509 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
1510 | |||
1511 | struct ftrace_event_call *call = &event_bprint; | 1606 | struct ftrace_event_call *call = &event_bprint; |
1512 | struct ring_buffer_event *event; | 1607 | struct ring_buffer_event *event; |
1513 | struct ring_buffer *buffer; | 1608 | struct ring_buffer *buffer; |
1514 | struct trace_array *tr = &global_trace; | 1609 | struct trace_array *tr = &global_trace; |
1515 | struct trace_array_cpu *data; | ||
1516 | struct bprint_entry *entry; | 1610 | struct bprint_entry *entry; |
1517 | unsigned long flags; | 1611 | unsigned long flags; |
1518 | int disable; | 1612 | char *tbuffer; |
1519 | int cpu, len = 0, size, pc; | 1613 | int len = 0, size, pc; |
1520 | 1614 | ||
1521 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1615 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
1522 | return 0; | 1616 | return 0; |
@@ -1526,43 +1620,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1526 | 1620 | ||
1527 | pc = preempt_count(); | 1621 | pc = preempt_count(); |
1528 | preempt_disable_notrace(); | 1622 | preempt_disable_notrace(); |
1529 | cpu = raw_smp_processor_id(); | ||
1530 | data = tr->data[cpu]; | ||
1531 | 1623 | ||
1532 | disable = atomic_inc_return(&data->disabled); | 1624 | tbuffer = get_trace_buf(); |
1533 | if (unlikely(disable != 1)) | 1625 | if (!tbuffer) { |
1626 | len = 0; | ||
1534 | goto out; | 1627 | goto out; |
1628 | } | ||
1535 | 1629 | ||
1536 | /* Lockdep uses trace_printk for lock tracing */ | 1630 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
1537 | local_irq_save(flags); | ||
1538 | arch_spin_lock(&trace_buf_lock); | ||
1539 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1540 | 1631 | ||
1541 | if (len > TRACE_BUF_SIZE || len < 0) | 1632 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
1542 | goto out_unlock; | 1633 | goto out; |
1543 | 1634 | ||
1635 | local_save_flags(flags); | ||
1544 | size = sizeof(*entry) + sizeof(u32) * len; | 1636 | size = sizeof(*entry) + sizeof(u32) * len; |
1545 | buffer = tr->buffer; | 1637 | buffer = tr->buffer; |
1546 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 1638 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
1547 | flags, pc); | 1639 | flags, pc); |
1548 | if (!event) | 1640 | if (!event) |
1549 | goto out_unlock; | 1641 | goto out; |
1550 | entry = ring_buffer_event_data(event); | 1642 | entry = ring_buffer_event_data(event); |
1551 | entry->ip = ip; | 1643 | entry->ip = ip; |
1552 | entry->fmt = fmt; | 1644 | entry->fmt = fmt; |
1553 | 1645 | ||
1554 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1646 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
1555 | if (!filter_check_discard(call, entry, buffer, event)) { | 1647 | if (!filter_check_discard(call, entry, buffer, event)) { |
1556 | ring_buffer_unlock_commit(buffer, event); | 1648 | ring_buffer_unlock_commit(buffer, event); |
1557 | ftrace_trace_stack(buffer, flags, 6, pc); | 1649 | ftrace_trace_stack(buffer, flags, 6, pc); |
1558 | } | 1650 | } |
1559 | 1651 | ||
1560 | out_unlock: | ||
1561 | arch_spin_unlock(&trace_buf_lock); | ||
1562 | local_irq_restore(flags); | ||
1563 | |||
1564 | out: | 1652 | out: |
1565 | atomic_dec_return(&data->disabled); | ||
1566 | preempt_enable_notrace(); | 1653 | preempt_enable_notrace(); |
1567 | unpause_graph_tracing(); | 1654 | unpause_graph_tracing(); |
1568 | 1655 | ||
@@ -1588,58 +1675,53 @@ int trace_array_printk(struct trace_array *tr, | |||
1588 | int trace_array_vprintk(struct trace_array *tr, | 1675 | int trace_array_vprintk(struct trace_array *tr, |
1589 | unsigned long ip, const char *fmt, va_list args) | 1676 | unsigned long ip, const char *fmt, va_list args) |
1590 | { | 1677 | { |
1591 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
1592 | static char trace_buf[TRACE_BUF_SIZE]; | ||
1593 | |||
1594 | struct ftrace_event_call *call = &event_print; | 1678 | struct ftrace_event_call *call = &event_print; |
1595 | struct ring_buffer_event *event; | 1679 | struct ring_buffer_event *event; |
1596 | struct ring_buffer *buffer; | 1680 | struct ring_buffer *buffer; |
1597 | struct trace_array_cpu *data; | 1681 | int len = 0, size, pc; |
1598 | int cpu, len = 0, size, pc; | ||
1599 | struct print_entry *entry; | 1682 | struct print_entry *entry; |
1600 | unsigned long irq_flags; | 1683 | unsigned long flags; |
1601 | int disable; | 1684 | char *tbuffer; |
1602 | 1685 | ||
1603 | if (tracing_disabled || tracing_selftest_running) | 1686 | if (tracing_disabled || tracing_selftest_running) |
1604 | return 0; | 1687 | return 0; |
1605 | 1688 | ||
1689 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
1690 | pause_graph_tracing(); | ||
1691 | |||
1606 | pc = preempt_count(); | 1692 | pc = preempt_count(); |
1607 | preempt_disable_notrace(); | 1693 | preempt_disable_notrace(); |
1608 | cpu = raw_smp_processor_id(); | ||
1609 | data = tr->data[cpu]; | ||
1610 | 1694 | ||
1611 | disable = atomic_inc_return(&data->disabled); | 1695 | |
1612 | if (unlikely(disable != 1)) | 1696 | tbuffer = get_trace_buf(); |
1697 | if (!tbuffer) { | ||
1698 | len = 0; | ||
1613 | goto out; | 1699 | goto out; |
1700 | } | ||
1614 | 1701 | ||
1615 | pause_graph_tracing(); | 1702 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
1616 | raw_local_irq_save(irq_flags); | 1703 | if (len > TRACE_BUF_SIZE) |
1617 | arch_spin_lock(&trace_buf_lock); | 1704 | goto out; |
1618 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1619 | 1705 | ||
1706 | local_save_flags(flags); | ||
1620 | size = sizeof(*entry) + len + 1; | 1707 | size = sizeof(*entry) + len + 1; |
1621 | buffer = tr->buffer; | 1708 | buffer = tr->buffer; |
1622 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1709 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
1623 | irq_flags, pc); | 1710 | flags, pc); |
1624 | if (!event) | 1711 | if (!event) |
1625 | goto out_unlock; | 1712 | goto out; |
1626 | entry = ring_buffer_event_data(event); | 1713 | entry = ring_buffer_event_data(event); |
1627 | entry->ip = ip; | 1714 | entry->ip = ip; |
1628 | 1715 | ||
1629 | memcpy(&entry->buf, trace_buf, len); | 1716 | memcpy(&entry->buf, tbuffer, len); |
1630 | entry->buf[len] = '\0'; | 1717 | entry->buf[len] = '\0'; |
1631 | if (!filter_check_discard(call, entry, buffer, event)) { | 1718 | if (!filter_check_discard(call, entry, buffer, event)) { |
1632 | ring_buffer_unlock_commit(buffer, event); | 1719 | ring_buffer_unlock_commit(buffer, event); |
1633 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | 1720 | ftrace_trace_stack(buffer, flags, 6, pc); |
1634 | } | 1721 | } |
1635 | |||
1636 | out_unlock: | ||
1637 | arch_spin_unlock(&trace_buf_lock); | ||
1638 | raw_local_irq_restore(irq_flags); | ||
1639 | unpause_graph_tracing(); | ||
1640 | out: | 1722 | out: |
1641 | atomic_dec_return(&data->disabled); | ||
1642 | preempt_enable_notrace(); | 1723 | preempt_enable_notrace(); |
1724 | unpause_graph_tracing(); | ||
1643 | 1725 | ||
1644 | return len; | 1726 | return len; |
1645 | } | 1727 | } |
@@ -4955,6 +5037,10 @@ __init static int tracer_alloc_buffers(void) | |||
4955 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 5037 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4956 | goto out_free_buffer_mask; | 5038 | goto out_free_buffer_mask; |
4957 | 5039 | ||
5040 | /* Only allocate trace_printk buffers if a trace_printk exists */ | ||
5041 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | ||
5042 | trace_printk_init_buffers(); | ||
5043 | |||
4958 | /* To save memory, keep the ring buffer size to its minimum */ | 5044 | /* To save memory, keep the ring buffer size to its minimum */ |
4959 | if (ring_buffer_expanded) | 5045 | if (ring_buffer_expanded) |
4960 | ring_buf_size = trace_buf_size; | 5046 | ring_buf_size = trace_buf_size; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 95059f091a24..f9d85504f04b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -826,6 +826,8 @@ extern struct list_head ftrace_events; | |||
826 | extern const char *__start___trace_bprintk_fmt[]; | 826 | extern const char *__start___trace_bprintk_fmt[]; |
827 | extern const char *__stop___trace_bprintk_fmt[]; | 827 | extern const char *__stop___trace_bprintk_fmt[]; |
828 | 828 | ||
829 | void trace_printk_init_buffers(void); | ||
830 | |||
829 | #undef FTRACE_ENTRY | 831 | #undef FTRACE_ENTRY |
830 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 832 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
831 | extern struct ftrace_event_call \ | 833 | extern struct ftrace_event_call \ |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 6fd4ffd042f9..a9077c1b4ad3 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -51,6 +51,10 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
51 | const char **iter; | 51 | const char **iter; |
52 | char *fmt; | 52 | char *fmt; |
53 | 53 | ||
54 | /* allocate the trace_printk per cpu buffers */ | ||
55 | if (start != end) | ||
56 | trace_printk_init_buffers(); | ||
57 | |||
54 | mutex_lock(&btrace_mutex); | 58 | mutex_lock(&btrace_mutex); |
55 | for (iter = start; iter < end; iter++) { | 59 | for (iter = start; iter < end; iter++) { |
56 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); | 60 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); |