diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 503 |
1 files changed, 312 insertions, 191 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2a22255c1010..68032c6177db 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -87,18 +87,6 @@ static int tracing_disabled = 1; | |||
| 87 | 87 | ||
| 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
| 89 | 89 | ||
| 90 | static inline void ftrace_disable_cpu(void) | ||
| 91 | { | ||
| 92 | preempt_disable(); | ||
| 93 | __this_cpu_inc(ftrace_cpu_disabled); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline void ftrace_enable_cpu(void) | ||
| 97 | { | ||
| 98 | __this_cpu_dec(ftrace_cpu_disabled); | ||
| 99 | preempt_enable(); | ||
| 100 | } | ||
| 101 | |||
| 102 | cpumask_var_t __read_mostly tracing_buffer_mask; | 90 | cpumask_var_t __read_mostly tracing_buffer_mask; |
| 103 | 91 | ||
| 104 | /* | 92 | /* |
| @@ -629,7 +617,6 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | |||
| 629 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 617 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
| 630 | { | 618 | { |
| 631 | int len; | 619 | int len; |
| 632 | void *ret; | ||
| 633 | 620 | ||
| 634 | if (s->len <= s->readpos) | 621 | if (s->len <= s->readpos) |
| 635 | return -EBUSY; | 622 | return -EBUSY; |
| @@ -637,9 +624,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 637 | len = s->len - s->readpos; | 624 | len = s->len - s->readpos; |
| 638 | if (cnt > len) | 625 | if (cnt > len) |
| 639 | cnt = len; | 626 | cnt = len; |
| 640 | ret = memcpy(buf, s->buffer + s->readpos, cnt); | 627 | memcpy(buf, s->buffer + s->readpos, cnt); |
| 641 | if (!ret) | ||
| 642 | return -EFAULT; | ||
| 643 | 628 | ||
| 644 | s->readpos += cnt; | 629 | s->readpos += cnt; |
| 645 | return cnt; | 630 | return cnt; |
| @@ -751,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 751 | 736 | ||
| 752 | arch_spin_lock(&ftrace_max_lock); | 737 | arch_spin_lock(&ftrace_max_lock); |
| 753 | 738 | ||
| 754 | ftrace_disable_cpu(); | ||
| 755 | |||
| 756 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 739 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
| 757 | 740 | ||
| 758 | if (ret == -EBUSY) { | 741 | if (ret == -EBUSY) { |
| @@ -766,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 766 | "Failed to swap buffers due to commit in progress\n"); | 749 | "Failed to swap buffers due to commit in progress\n"); |
| 767 | } | 750 | } |
| 768 | 751 | ||
| 769 | ftrace_enable_cpu(); | ||
| 770 | |||
| 771 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 752 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
| 772 | 753 | ||
| 773 | __update_max_tr(tr, tsk, cpu); | 754 | __update_max_tr(tr, tsk, cpu); |
| @@ -782,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 782 | * Register a new plugin tracer. | 763 | * Register a new plugin tracer. |
| 783 | */ | 764 | */ |
| 784 | int register_tracer(struct tracer *type) | 765 | int register_tracer(struct tracer *type) |
| 785 | __releases(kernel_lock) | ||
| 786 | __acquires(kernel_lock) | ||
| 787 | { | 766 | { |
| 788 | struct tracer *t; | 767 | struct tracer *t; |
| 789 | int ret = 0; | 768 | int ret = 0; |
| @@ -841,7 +820,8 @@ __acquires(kernel_lock) | |||
| 841 | 820 | ||
| 842 | /* If we expanded the buffers, make sure the max is expanded too */ | 821 | /* If we expanded the buffers, make sure the max is expanded too */ |
| 843 | if (ring_buffer_expanded && type->use_max_tr) | 822 | if (ring_buffer_expanded && type->use_max_tr) |
| 844 | ring_buffer_resize(max_tr.buffer, trace_buf_size); | 823 | ring_buffer_resize(max_tr.buffer, trace_buf_size, |
| 824 | RING_BUFFER_ALL_CPUS); | ||
| 845 | 825 | ||
| 846 | /* the test is responsible for initializing and enabling */ | 826 | /* the test is responsible for initializing and enabling */ |
| 847 | pr_info("Testing tracer %s: ", type->name); | 827 | pr_info("Testing tracer %s: ", type->name); |
| @@ -857,7 +837,8 @@ __acquires(kernel_lock) | |||
| 857 | 837 | ||
| 858 | /* Shrink the max buffer again */ | 838 | /* Shrink the max buffer again */ |
| 859 | if (ring_buffer_expanded && type->use_max_tr) | 839 | if (ring_buffer_expanded && type->use_max_tr) |
| 860 | ring_buffer_resize(max_tr.buffer, 1); | 840 | ring_buffer_resize(max_tr.buffer, 1, |
| 841 | RING_BUFFER_ALL_CPUS); | ||
| 861 | 842 | ||
| 862 | printk(KERN_CONT "PASSED\n"); | 843 | printk(KERN_CONT "PASSED\n"); |
| 863 | } | 844 | } |
| @@ -917,13 +898,6 @@ out: | |||
| 917 | mutex_unlock(&trace_types_lock); | 898 | mutex_unlock(&trace_types_lock); |
| 918 | } | 899 | } |
| 919 | 900 | ||
| 920 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | ||
| 921 | { | ||
| 922 | ftrace_disable_cpu(); | ||
| 923 | ring_buffer_reset_cpu(buffer, cpu); | ||
| 924 | ftrace_enable_cpu(); | ||
| 925 | } | ||
| 926 | |||
| 927 | void tracing_reset(struct trace_array *tr, int cpu) | 901 | void tracing_reset(struct trace_array *tr, int cpu) |
| 928 | { | 902 | { |
| 929 | struct ring_buffer *buffer = tr->buffer; | 903 | struct ring_buffer *buffer = tr->buffer; |
| @@ -932,7 +906,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
| 932 | 906 | ||
| 933 | /* Make sure all commits have finished */ | 907 | /* Make sure all commits have finished */ |
| 934 | synchronize_sched(); | 908 | synchronize_sched(); |
| 935 | __tracing_reset(buffer, cpu); | 909 | ring_buffer_reset_cpu(buffer, cpu); |
| 936 | 910 | ||
| 937 | ring_buffer_record_enable(buffer); | 911 | ring_buffer_record_enable(buffer); |
| 938 | } | 912 | } |
| @@ -950,7 +924,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 950 | tr->time_start = ftrace_now(tr->cpu); | 924 | tr->time_start = ftrace_now(tr->cpu); |
| 951 | 925 | ||
| 952 | for_each_online_cpu(cpu) | 926 | for_each_online_cpu(cpu) |
| 953 | __tracing_reset(buffer, cpu); | 927 | ring_buffer_reset_cpu(buffer, cpu); |
| 954 | 928 | ||
| 955 | ring_buffer_record_enable(buffer); | 929 | ring_buffer_record_enable(buffer); |
| 956 | } | 930 | } |
| @@ -1498,25 +1472,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
| 1498 | 1472 | ||
| 1499 | #endif /* CONFIG_STACKTRACE */ | 1473 | #endif /* CONFIG_STACKTRACE */ |
| 1500 | 1474 | ||
| 1475 | /* created for use with alloc_percpu */ | ||
| 1476 | struct trace_buffer_struct { | ||
| 1477 | char buffer[TRACE_BUF_SIZE]; | ||
| 1478 | }; | ||
| 1479 | |||
| 1480 | static struct trace_buffer_struct *trace_percpu_buffer; | ||
| 1481 | static struct trace_buffer_struct *trace_percpu_sirq_buffer; | ||
| 1482 | static struct trace_buffer_struct *trace_percpu_irq_buffer; | ||
| 1483 | static struct trace_buffer_struct *trace_percpu_nmi_buffer; | ||
| 1484 | |||
| 1485 | /* | ||
| 1486 | * The buffer used is dependent on the context. There is a per cpu | ||
| 1487 | * buffer for normal context, softirq contex, hard irq context and | ||
| 1488 | * for NMI context. Thise allows for lockless recording. | ||
| 1489 | * | ||
| 1490 | * Note, if the buffers failed to be allocated, then this returns NULL | ||
| 1491 | */ | ||
| 1492 | static char *get_trace_buf(void) | ||
| 1493 | { | ||
| 1494 | struct trace_buffer_struct *percpu_buffer; | ||
| 1495 | struct trace_buffer_struct *buffer; | ||
| 1496 | |||
| 1497 | /* | ||
| 1498 | * If we have allocated per cpu buffers, then we do not | ||
| 1499 | * need to do any locking. | ||
| 1500 | */ | ||
| 1501 | if (in_nmi()) | ||
| 1502 | percpu_buffer = trace_percpu_nmi_buffer; | ||
| 1503 | else if (in_irq()) | ||
| 1504 | percpu_buffer = trace_percpu_irq_buffer; | ||
| 1505 | else if (in_softirq()) | ||
| 1506 | percpu_buffer = trace_percpu_sirq_buffer; | ||
| 1507 | else | ||
| 1508 | percpu_buffer = trace_percpu_buffer; | ||
| 1509 | |||
| 1510 | if (!percpu_buffer) | ||
| 1511 | return NULL; | ||
| 1512 | |||
| 1513 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | ||
| 1514 | |||
| 1515 | return buffer->buffer; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | static int alloc_percpu_trace_buffer(void) | ||
| 1519 | { | ||
| 1520 | struct trace_buffer_struct *buffers; | ||
| 1521 | struct trace_buffer_struct *sirq_buffers; | ||
| 1522 | struct trace_buffer_struct *irq_buffers; | ||
| 1523 | struct trace_buffer_struct *nmi_buffers; | ||
| 1524 | |||
| 1525 | buffers = alloc_percpu(struct trace_buffer_struct); | ||
| 1526 | if (!buffers) | ||
| 1527 | goto err_warn; | ||
| 1528 | |||
| 1529 | sirq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
| 1530 | if (!sirq_buffers) | ||
| 1531 | goto err_sirq; | ||
| 1532 | |||
| 1533 | irq_buffers = alloc_percpu(struct trace_buffer_struct); | ||
| 1534 | if (!irq_buffers) | ||
| 1535 | goto err_irq; | ||
| 1536 | |||
| 1537 | nmi_buffers = alloc_percpu(struct trace_buffer_struct); | ||
| 1538 | if (!nmi_buffers) | ||
| 1539 | goto err_nmi; | ||
| 1540 | |||
| 1541 | trace_percpu_buffer = buffers; | ||
| 1542 | trace_percpu_sirq_buffer = sirq_buffers; | ||
| 1543 | trace_percpu_irq_buffer = irq_buffers; | ||
| 1544 | trace_percpu_nmi_buffer = nmi_buffers; | ||
| 1545 | |||
| 1546 | return 0; | ||
| 1547 | |||
| 1548 | err_nmi: | ||
| 1549 | free_percpu(irq_buffers); | ||
| 1550 | err_irq: | ||
| 1551 | free_percpu(sirq_buffers); | ||
| 1552 | err_sirq: | ||
| 1553 | free_percpu(buffers); | ||
| 1554 | err_warn: | ||
| 1555 | WARN(1, "Could not allocate percpu trace_printk buffer"); | ||
| 1556 | return -ENOMEM; | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | void trace_printk_init_buffers(void) | ||
| 1560 | { | ||
| 1561 | static int buffers_allocated; | ||
| 1562 | |||
| 1563 | if (buffers_allocated) | ||
| 1564 | return; | ||
| 1565 | |||
| 1566 | if (alloc_percpu_trace_buffer()) | ||
| 1567 | return; | ||
| 1568 | |||
| 1569 | pr_info("ftrace: Allocated trace_printk buffers\n"); | ||
| 1570 | |||
| 1571 | buffers_allocated = 1; | ||
| 1572 | } | ||
| 1573 | |||
| 1501 | /** | 1574 | /** |
| 1502 | * trace_vbprintk - write binary msg to tracing buffer | 1575 | * trace_vbprintk - write binary msg to tracing buffer |
| 1503 | * | 1576 | * |
| 1504 | */ | 1577 | */ |
| 1505 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1578 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
| 1506 | { | 1579 | { |
| 1507 | static arch_spinlock_t trace_buf_lock = | ||
| 1508 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||
| 1509 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
| 1510 | |||
| 1511 | struct ftrace_event_call *call = &event_bprint; | 1580 | struct ftrace_event_call *call = &event_bprint; |
| 1512 | struct ring_buffer_event *event; | 1581 | struct ring_buffer_event *event; |
| 1513 | struct ring_buffer *buffer; | 1582 | struct ring_buffer *buffer; |
| 1514 | struct trace_array *tr = &global_trace; | 1583 | struct trace_array *tr = &global_trace; |
| 1515 | struct trace_array_cpu *data; | ||
| 1516 | struct bprint_entry *entry; | 1584 | struct bprint_entry *entry; |
| 1517 | unsigned long flags; | 1585 | unsigned long flags; |
| 1518 | int disable; | 1586 | char *tbuffer; |
| 1519 | int cpu, len = 0, size, pc; | 1587 | int len = 0, size, pc; |
| 1520 | 1588 | ||
| 1521 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1589 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
| 1522 | return 0; | 1590 | return 0; |
| @@ -1526,43 +1594,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1526 | 1594 | ||
| 1527 | pc = preempt_count(); | 1595 | pc = preempt_count(); |
| 1528 | preempt_disable_notrace(); | 1596 | preempt_disable_notrace(); |
| 1529 | cpu = raw_smp_processor_id(); | ||
| 1530 | data = tr->data[cpu]; | ||
| 1531 | 1597 | ||
| 1532 | disable = atomic_inc_return(&data->disabled); | 1598 | tbuffer = get_trace_buf(); |
| 1533 | if (unlikely(disable != 1)) | 1599 | if (!tbuffer) { |
| 1600 | len = 0; | ||
| 1534 | goto out; | 1601 | goto out; |
| 1602 | } | ||
| 1535 | 1603 | ||
| 1536 | /* Lockdep uses trace_printk for lock tracing */ | 1604 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
| 1537 | local_irq_save(flags); | ||
| 1538 | arch_spin_lock(&trace_buf_lock); | ||
| 1539 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1540 | 1605 | ||
| 1541 | if (len > TRACE_BUF_SIZE || len < 0) | 1606 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
| 1542 | goto out_unlock; | 1607 | goto out; |
| 1543 | 1608 | ||
| 1609 | local_save_flags(flags); | ||
| 1544 | size = sizeof(*entry) + sizeof(u32) * len; | 1610 | size = sizeof(*entry) + sizeof(u32) * len; |
| 1545 | buffer = tr->buffer; | 1611 | buffer = tr->buffer; |
| 1546 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 1612 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
| 1547 | flags, pc); | 1613 | flags, pc); |
| 1548 | if (!event) | 1614 | if (!event) |
| 1549 | goto out_unlock; | 1615 | goto out; |
| 1550 | entry = ring_buffer_event_data(event); | 1616 | entry = ring_buffer_event_data(event); |
| 1551 | entry->ip = ip; | 1617 | entry->ip = ip; |
| 1552 | entry->fmt = fmt; | 1618 | entry->fmt = fmt; |
| 1553 | 1619 | ||
| 1554 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1620 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
| 1555 | if (!filter_check_discard(call, entry, buffer, event)) { | 1621 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1556 | ring_buffer_unlock_commit(buffer, event); | 1622 | ring_buffer_unlock_commit(buffer, event); |
| 1557 | ftrace_trace_stack(buffer, flags, 6, pc); | 1623 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 1558 | } | 1624 | } |
| 1559 | 1625 | ||
| 1560 | out_unlock: | ||
| 1561 | arch_spin_unlock(&trace_buf_lock); | ||
| 1562 | local_irq_restore(flags); | ||
| 1563 | |||
| 1564 | out: | 1626 | out: |
| 1565 | atomic_dec_return(&data->disabled); | ||
| 1566 | preempt_enable_notrace(); | 1627 | preempt_enable_notrace(); |
| 1567 | unpause_graph_tracing(); | 1628 | unpause_graph_tracing(); |
| 1568 | 1629 | ||
| @@ -1588,58 +1649,53 @@ int trace_array_printk(struct trace_array *tr, | |||
| 1588 | int trace_array_vprintk(struct trace_array *tr, | 1649 | int trace_array_vprintk(struct trace_array *tr, |
| 1589 | unsigned long ip, const char *fmt, va_list args) | 1650 | unsigned long ip, const char *fmt, va_list args) |
| 1590 | { | 1651 | { |
| 1591 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
| 1592 | static char trace_buf[TRACE_BUF_SIZE]; | ||
| 1593 | |||
| 1594 | struct ftrace_event_call *call = &event_print; | 1652 | struct ftrace_event_call *call = &event_print; |
| 1595 | struct ring_buffer_event *event; | 1653 | struct ring_buffer_event *event; |
| 1596 | struct ring_buffer *buffer; | 1654 | struct ring_buffer *buffer; |
| 1597 | struct trace_array_cpu *data; | 1655 | int len = 0, size, pc; |
| 1598 | int cpu, len = 0, size, pc; | ||
| 1599 | struct print_entry *entry; | 1656 | struct print_entry *entry; |
| 1600 | unsigned long irq_flags; | 1657 | unsigned long flags; |
| 1601 | int disable; | 1658 | char *tbuffer; |
| 1602 | 1659 | ||
| 1603 | if (tracing_disabled || tracing_selftest_running) | 1660 | if (tracing_disabled || tracing_selftest_running) |
| 1604 | return 0; | 1661 | return 0; |
| 1605 | 1662 | ||
| 1663 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
| 1664 | pause_graph_tracing(); | ||
| 1665 | |||
| 1606 | pc = preempt_count(); | 1666 | pc = preempt_count(); |
| 1607 | preempt_disable_notrace(); | 1667 | preempt_disable_notrace(); |
| 1608 | cpu = raw_smp_processor_id(); | ||
| 1609 | data = tr->data[cpu]; | ||
| 1610 | 1668 | ||
| 1611 | disable = atomic_inc_return(&data->disabled); | 1669 | |
| 1612 | if (unlikely(disable != 1)) | 1670 | tbuffer = get_trace_buf(); |
| 1671 | if (!tbuffer) { | ||
| 1672 | len = 0; | ||
| 1613 | goto out; | 1673 | goto out; |
| 1674 | } | ||
| 1614 | 1675 | ||
| 1615 | pause_graph_tracing(); | 1676 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
| 1616 | raw_local_irq_save(irq_flags); | 1677 | if (len > TRACE_BUF_SIZE) |
| 1617 | arch_spin_lock(&trace_buf_lock); | 1678 | goto out; |
| 1618 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1619 | 1679 | ||
| 1680 | local_save_flags(flags); | ||
| 1620 | size = sizeof(*entry) + len + 1; | 1681 | size = sizeof(*entry) + len + 1; |
| 1621 | buffer = tr->buffer; | 1682 | buffer = tr->buffer; |
| 1622 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1683 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
| 1623 | irq_flags, pc); | 1684 | flags, pc); |
| 1624 | if (!event) | 1685 | if (!event) |
| 1625 | goto out_unlock; | 1686 | goto out; |
| 1626 | entry = ring_buffer_event_data(event); | 1687 | entry = ring_buffer_event_data(event); |
| 1627 | entry->ip = ip; | 1688 | entry->ip = ip; |
| 1628 | 1689 | ||
| 1629 | memcpy(&entry->buf, trace_buf, len); | 1690 | memcpy(&entry->buf, tbuffer, len); |
| 1630 | entry->buf[len] = '\0'; | 1691 | entry->buf[len] = '\0'; |
| 1631 | if (!filter_check_discard(call, entry, buffer, event)) { | 1692 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1632 | ring_buffer_unlock_commit(buffer, event); | 1693 | ring_buffer_unlock_commit(buffer, event); |
| 1633 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | 1694 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 1634 | } | 1695 | } |
| 1635 | |||
| 1636 | out_unlock: | ||
| 1637 | arch_spin_unlock(&trace_buf_lock); | ||
| 1638 | raw_local_irq_restore(irq_flags); | ||
| 1639 | unpause_graph_tracing(); | ||
| 1640 | out: | 1696 | out: |
| 1641 | atomic_dec_return(&data->disabled); | ||
| 1642 | preempt_enable_notrace(); | 1697 | preempt_enable_notrace(); |
| 1698 | unpause_graph_tracing(); | ||
| 1643 | 1699 | ||
| 1644 | return len; | 1700 | return len; |
| 1645 | } | 1701 | } |
| @@ -1652,14 +1708,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); | |||
| 1652 | 1708 | ||
| 1653 | static void trace_iterator_increment(struct trace_iterator *iter) | 1709 | static void trace_iterator_increment(struct trace_iterator *iter) |
| 1654 | { | 1710 | { |
| 1655 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1656 | ftrace_disable_cpu(); | ||
| 1657 | |||
| 1658 | iter->idx++; | 1711 | iter->idx++; |
| 1659 | if (iter->buffer_iter[iter->cpu]) | 1712 | if (iter->buffer_iter[iter->cpu]) |
| 1660 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1713 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
| 1661 | |||
| 1662 | ftrace_enable_cpu(); | ||
| 1663 | } | 1714 | } |
| 1664 | 1715 | ||
| 1665 | static struct trace_entry * | 1716 | static struct trace_entry * |
| @@ -1669,17 +1720,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
| 1669 | struct ring_buffer_event *event; | 1720 | struct ring_buffer_event *event; |
| 1670 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1721 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
| 1671 | 1722 | ||
| 1672 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1673 | ftrace_disable_cpu(); | ||
| 1674 | |||
| 1675 | if (buf_iter) | 1723 | if (buf_iter) |
| 1676 | event = ring_buffer_iter_peek(buf_iter, ts); | 1724 | event = ring_buffer_iter_peek(buf_iter, ts); |
| 1677 | else | 1725 | else |
| 1678 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 1726 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
| 1679 | lost_events); | 1727 | lost_events); |
| 1680 | 1728 | ||
| 1681 | ftrace_enable_cpu(); | ||
| 1682 | |||
| 1683 | if (event) { | 1729 | if (event) { |
| 1684 | iter->ent_size = ring_buffer_event_length(event); | 1730 | iter->ent_size = ring_buffer_event_length(event); |
| 1685 | return ring_buffer_event_data(event); | 1731 | return ring_buffer_event_data(event); |
| @@ -1769,11 +1815,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) | |||
| 1769 | 1815 | ||
| 1770 | static void trace_consume(struct trace_iterator *iter) | 1816 | static void trace_consume(struct trace_iterator *iter) |
| 1771 | { | 1817 | { |
| 1772 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1773 | ftrace_disable_cpu(); | ||
| 1774 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 1818 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
| 1775 | &iter->lost_events); | 1819 | &iter->lost_events); |
| 1776 | ftrace_enable_cpu(); | ||
| 1777 | } | 1820 | } |
| 1778 | 1821 | ||
| 1779 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1822 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
| @@ -1862,16 +1905,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1862 | iter->cpu = 0; | 1905 | iter->cpu = 0; |
| 1863 | iter->idx = -1; | 1906 | iter->idx = -1; |
| 1864 | 1907 | ||
| 1865 | ftrace_disable_cpu(); | ||
| 1866 | |||
| 1867 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1908 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
| 1868 | for_each_tracing_cpu(cpu) | 1909 | for_each_tracing_cpu(cpu) |
| 1869 | tracing_iter_reset(iter, cpu); | 1910 | tracing_iter_reset(iter, cpu); |
| 1870 | } else | 1911 | } else |
| 1871 | tracing_iter_reset(iter, cpu_file); | 1912 | tracing_iter_reset(iter, cpu_file); |
| 1872 | 1913 | ||
| 1873 | ftrace_enable_cpu(); | ||
| 1874 | |||
| 1875 | iter->leftover = 0; | 1914 | iter->leftover = 0; |
| 1876 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1915 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
| 1877 | ; | 1916 | ; |
| @@ -2332,15 +2371,13 @@ static struct trace_iterator * | |||
| 2332 | __tracing_open(struct inode *inode, struct file *file) | 2371 | __tracing_open(struct inode *inode, struct file *file) |
| 2333 | { | 2372 | { |
| 2334 | long cpu_file = (long) inode->i_private; | 2373 | long cpu_file = (long) inode->i_private; |
| 2335 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
| 2336 | struct trace_iterator *iter; | 2374 | struct trace_iterator *iter; |
| 2337 | struct seq_file *m; | 2375 | int cpu; |
| 2338 | int cpu, ret; | ||
| 2339 | 2376 | ||
| 2340 | if (tracing_disabled) | 2377 | if (tracing_disabled) |
| 2341 | return ERR_PTR(-ENODEV); | 2378 | return ERR_PTR(-ENODEV); |
| 2342 | 2379 | ||
| 2343 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2380 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); |
| 2344 | if (!iter) | 2381 | if (!iter) |
| 2345 | return ERR_PTR(-ENOMEM); | 2382 | return ERR_PTR(-ENOMEM); |
| 2346 | 2383 | ||
| @@ -2397,32 +2434,15 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 2397 | tracing_iter_reset(iter, cpu); | 2434 | tracing_iter_reset(iter, cpu); |
| 2398 | } | 2435 | } |
| 2399 | 2436 | ||
| 2400 | ret = seq_open(file, &tracer_seq_ops); | ||
| 2401 | if (ret < 0) { | ||
| 2402 | fail_ret = ERR_PTR(ret); | ||
| 2403 | goto fail_buffer; | ||
| 2404 | } | ||
| 2405 | |||
| 2406 | m = file->private_data; | ||
| 2407 | m->private = iter; | ||
| 2408 | |||
| 2409 | mutex_unlock(&trace_types_lock); | 2437 | mutex_unlock(&trace_types_lock); |
| 2410 | 2438 | ||
| 2411 | return iter; | 2439 | return iter; |
| 2412 | 2440 | ||
| 2413 | fail_buffer: | ||
| 2414 | for_each_tracing_cpu(cpu) { | ||
| 2415 | if (iter->buffer_iter[cpu]) | ||
| 2416 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
| 2417 | } | ||
| 2418 | free_cpumask_var(iter->started); | ||
| 2419 | tracing_start(); | ||
| 2420 | fail: | 2441 | fail: |
| 2421 | mutex_unlock(&trace_types_lock); | 2442 | mutex_unlock(&trace_types_lock); |
| 2422 | kfree(iter->trace); | 2443 | kfree(iter->trace); |
| 2423 | kfree(iter); | 2444 | seq_release_private(inode, file); |
| 2424 | 2445 | return ERR_PTR(-ENOMEM); | |
| 2425 | return fail_ret; | ||
| 2426 | } | 2446 | } |
| 2427 | 2447 | ||
| 2428 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2448 | int tracing_open_generic(struct inode *inode, struct file *filp) |
| @@ -2458,11 +2478,10 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
| 2458 | tracing_start(); | 2478 | tracing_start(); |
| 2459 | mutex_unlock(&trace_types_lock); | 2479 | mutex_unlock(&trace_types_lock); |
| 2460 | 2480 | ||
| 2461 | seq_release(inode, file); | ||
| 2462 | mutex_destroy(&iter->mutex); | 2481 | mutex_destroy(&iter->mutex); |
| 2463 | free_cpumask_var(iter->started); | 2482 | free_cpumask_var(iter->started); |
| 2464 | kfree(iter->trace); | 2483 | kfree(iter->trace); |
| 2465 | kfree(iter); | 2484 | seq_release_private(inode, file); |
| 2466 | return 0; | 2485 | return 0; |
| 2467 | } | 2486 | } |
| 2468 | 2487 | ||
| @@ -2648,10 +2667,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2648 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 2667 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
| 2649 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2668 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
| 2650 | atomic_inc(&global_trace.data[cpu]->disabled); | 2669 | atomic_inc(&global_trace.data[cpu]->disabled); |
| 2670 | ring_buffer_record_disable_cpu(global_trace.buffer, cpu); | ||
| 2651 | } | 2671 | } |
| 2652 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 2672 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
| 2653 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2673 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
| 2654 | atomic_dec(&global_trace.data[cpu]->disabled); | 2674 | atomic_dec(&global_trace.data[cpu]->disabled); |
| 2675 | ring_buffer_record_enable_cpu(global_trace.buffer, cpu); | ||
| 2655 | } | 2676 | } |
| 2656 | } | 2677 | } |
| 2657 | arch_spin_unlock(&ftrace_max_lock); | 2678 | arch_spin_unlock(&ftrace_max_lock); |
| @@ -2974,7 +2995,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr) | |||
| 2974 | return t->init(tr); | 2995 | return t->init(tr); |
| 2975 | } | 2996 | } |
| 2976 | 2997 | ||
| 2977 | static int __tracing_resize_ring_buffer(unsigned long size) | 2998 | static void set_buffer_entries(struct trace_array *tr, unsigned long val) |
| 2999 | { | ||
| 3000 | int cpu; | ||
| 3001 | for_each_tracing_cpu(cpu) | ||
| 3002 | tr->data[cpu]->entries = val; | ||
| 3003 | } | ||
| 3004 | |||
| 3005 | static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | ||
| 2978 | { | 3006 | { |
| 2979 | int ret; | 3007 | int ret; |
| 2980 | 3008 | ||
| @@ -2985,19 +3013,32 @@ static int __tracing_resize_ring_buffer(unsigned long size) | |||
| 2985 | */ | 3013 | */ |
| 2986 | ring_buffer_expanded = 1; | 3014 | ring_buffer_expanded = 1; |
| 2987 | 3015 | ||
| 2988 | ret = ring_buffer_resize(global_trace.buffer, size); | 3016 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); |
| 2989 | if (ret < 0) | 3017 | if (ret < 0) |
| 2990 | return ret; | 3018 | return ret; |
| 2991 | 3019 | ||
| 2992 | if (!current_trace->use_max_tr) | 3020 | if (!current_trace->use_max_tr) |
| 2993 | goto out; | 3021 | goto out; |
| 2994 | 3022 | ||
| 2995 | ret = ring_buffer_resize(max_tr.buffer, size); | 3023 | ret = ring_buffer_resize(max_tr.buffer, size, cpu); |
| 2996 | if (ret < 0) { | 3024 | if (ret < 0) { |
| 2997 | int r; | 3025 | int r = 0; |
| 3026 | |||
| 3027 | if (cpu == RING_BUFFER_ALL_CPUS) { | ||
| 3028 | int i; | ||
| 3029 | for_each_tracing_cpu(i) { | ||
| 3030 | r = ring_buffer_resize(global_trace.buffer, | ||
| 3031 | global_trace.data[i]->entries, | ||
| 3032 | i); | ||
| 3033 | if (r < 0) | ||
| 3034 | break; | ||
| 3035 | } | ||
| 3036 | } else { | ||
| 3037 | r = ring_buffer_resize(global_trace.buffer, | ||
| 3038 | global_trace.data[cpu]->entries, | ||
| 3039 | cpu); | ||
| 3040 | } | ||
| 2998 | 3041 | ||
| 2999 | r = ring_buffer_resize(global_trace.buffer, | ||
| 3000 | global_trace.entries); | ||
| 3001 | if (r < 0) { | 3042 | if (r < 0) { |
| 3002 | /* | 3043 | /* |
| 3003 | * AARGH! We are left with different | 3044 | * AARGH! We are left with different |
| @@ -3019,43 +3060,39 @@ static int __tracing_resize_ring_buffer(unsigned long size) | |||
| 3019 | return ret; | 3060 | return ret; |
| 3020 | } | 3061 | } |
| 3021 | 3062 | ||
| 3022 | max_tr.entries = size; | 3063 | if (cpu == RING_BUFFER_ALL_CPUS) |
| 3064 | set_buffer_entries(&max_tr, size); | ||
| 3065 | else | ||
| 3066 | max_tr.data[cpu]->entries = size; | ||
| 3067 | |||
| 3023 | out: | 3068 | out: |
| 3024 | global_trace.entries = size; | 3069 | if (cpu == RING_BUFFER_ALL_CPUS) |
| 3070 | set_buffer_entries(&global_trace, size); | ||
| 3071 | else | ||
| 3072 | global_trace.data[cpu]->entries = size; | ||
| 3025 | 3073 | ||
| 3026 | return ret; | 3074 | return ret; |
| 3027 | } | 3075 | } |
| 3028 | 3076 | ||
| 3029 | static ssize_t tracing_resize_ring_buffer(unsigned long size) | 3077 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) |
| 3030 | { | 3078 | { |
| 3031 | int cpu, ret = size; | 3079 | int ret = size; |
| 3032 | 3080 | ||
| 3033 | mutex_lock(&trace_types_lock); | 3081 | mutex_lock(&trace_types_lock); |
| 3034 | 3082 | ||
| 3035 | tracing_stop(); | 3083 | if (cpu_id != RING_BUFFER_ALL_CPUS) { |
| 3036 | 3084 | /* make sure, this cpu is enabled in the mask */ | |
| 3037 | /* disable all cpu buffers */ | 3085 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { |
| 3038 | for_each_tracing_cpu(cpu) { | 3086 | ret = -EINVAL; |
| 3039 | if (global_trace.data[cpu]) | 3087 | goto out; |
| 3040 | atomic_inc(&global_trace.data[cpu]->disabled); | 3088 | } |
| 3041 | if (max_tr.data[cpu]) | ||
| 3042 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
| 3043 | } | 3089 | } |
| 3044 | 3090 | ||
| 3045 | if (size != global_trace.entries) | 3091 | ret = __tracing_resize_ring_buffer(size, cpu_id); |
| 3046 | ret = __tracing_resize_ring_buffer(size); | ||
| 3047 | |||
| 3048 | if (ret < 0) | 3092 | if (ret < 0) |
| 3049 | ret = -ENOMEM; | 3093 | ret = -ENOMEM; |
| 3050 | 3094 | ||
| 3051 | for_each_tracing_cpu(cpu) { | 3095 | out: |
| 3052 | if (global_trace.data[cpu]) | ||
| 3053 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
| 3054 | if (max_tr.data[cpu]) | ||
| 3055 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
| 3056 | } | ||
| 3057 | |||
| 3058 | tracing_start(); | ||
| 3059 | mutex_unlock(&trace_types_lock); | 3096 | mutex_unlock(&trace_types_lock); |
| 3060 | 3097 | ||
| 3061 | return ret; | 3098 | return ret; |
| @@ -3078,7 +3115,8 @@ int tracing_update_buffers(void) | |||
| 3078 | 3115 | ||
| 3079 | mutex_lock(&trace_types_lock); | 3116 | mutex_lock(&trace_types_lock); |
| 3080 | if (!ring_buffer_expanded) | 3117 | if (!ring_buffer_expanded) |
| 3081 | ret = __tracing_resize_ring_buffer(trace_buf_size); | 3118 | ret = __tracing_resize_ring_buffer(trace_buf_size, |
| 3119 | RING_BUFFER_ALL_CPUS); | ||
| 3082 | mutex_unlock(&trace_types_lock); | 3120 | mutex_unlock(&trace_types_lock); |
| 3083 | 3121 | ||
| 3084 | return ret; | 3122 | return ret; |
| @@ -3102,7 +3140,8 @@ static int tracing_set_tracer(const char *buf) | |||
| 3102 | mutex_lock(&trace_types_lock); | 3140 | mutex_lock(&trace_types_lock); |
| 3103 | 3141 | ||
| 3104 | if (!ring_buffer_expanded) { | 3142 | if (!ring_buffer_expanded) { |
| 3105 | ret = __tracing_resize_ring_buffer(trace_buf_size); | 3143 | ret = __tracing_resize_ring_buffer(trace_buf_size, |
| 3144 | RING_BUFFER_ALL_CPUS); | ||
| 3106 | if (ret < 0) | 3145 | if (ret < 0) |
| 3107 | goto out; | 3146 | goto out; |
| 3108 | ret = 0; | 3147 | ret = 0; |
| @@ -3128,8 +3167,8 @@ static int tracing_set_tracer(const char *buf) | |||
| 3128 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 3167 | * The max_tr ring buffer has some state (e.g. ring->clock) and |
| 3129 | * we want preserve it. | 3168 | * we want preserve it. |
| 3130 | */ | 3169 | */ |
| 3131 | ring_buffer_resize(max_tr.buffer, 1); | 3170 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
| 3132 | max_tr.entries = 1; | 3171 | set_buffer_entries(&max_tr, 1); |
| 3133 | } | 3172 | } |
| 3134 | destroy_trace_option_files(topts); | 3173 | destroy_trace_option_files(topts); |
| 3135 | 3174 | ||
| @@ -3137,10 +3176,17 @@ static int tracing_set_tracer(const char *buf) | |||
| 3137 | 3176 | ||
| 3138 | topts = create_trace_option_files(current_trace); | 3177 | topts = create_trace_option_files(current_trace); |
| 3139 | if (current_trace->use_max_tr) { | 3178 | if (current_trace->use_max_tr) { |
| 3140 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | 3179 | int cpu; |
| 3141 | if (ret < 0) | 3180 | /* we need to make per cpu buffer sizes equivalent */ |
| 3142 | goto out; | 3181 | for_each_tracing_cpu(cpu) { |
| 3143 | max_tr.entries = global_trace.entries; | 3182 | ret = ring_buffer_resize(max_tr.buffer, |
| 3183 | global_trace.data[cpu]->entries, | ||
| 3184 | cpu); | ||
| 3185 | if (ret < 0) | ||
| 3186 | goto out; | ||
| 3187 | max_tr.data[cpu]->entries = | ||
| 3188 | global_trace.data[cpu]->entries; | ||
| 3189 | } | ||
| 3144 | } | 3190 | } |
| 3145 | 3191 | ||
| 3146 | if (t->init) { | 3192 | if (t->init) { |
| @@ -3642,30 +3688,82 @@ out_err: | |||
| 3642 | goto out; | 3688 | goto out; |
| 3643 | } | 3689 | } |
| 3644 | 3690 | ||
| 3691 | struct ftrace_entries_info { | ||
| 3692 | struct trace_array *tr; | ||
| 3693 | int cpu; | ||
| 3694 | }; | ||
| 3695 | |||
| 3696 | static int tracing_entries_open(struct inode *inode, struct file *filp) | ||
| 3697 | { | ||
| 3698 | struct ftrace_entries_info *info; | ||
| 3699 | |||
| 3700 | if (tracing_disabled) | ||
| 3701 | return -ENODEV; | ||
| 3702 | |||
| 3703 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
| 3704 | if (!info) | ||
| 3705 | return -ENOMEM; | ||
| 3706 | |||
| 3707 | info->tr = &global_trace; | ||
| 3708 | info->cpu = (unsigned long)inode->i_private; | ||
| 3709 | |||
| 3710 | filp->private_data = info; | ||
| 3711 | |||
| 3712 | return 0; | ||
| 3713 | } | ||
| 3714 | |||
| 3645 | static ssize_t | 3715 | static ssize_t |
| 3646 | tracing_entries_read(struct file *filp, char __user *ubuf, | 3716 | tracing_entries_read(struct file *filp, char __user *ubuf, |
| 3647 | size_t cnt, loff_t *ppos) | 3717 | size_t cnt, loff_t *ppos) |
| 3648 | { | 3718 | { |
| 3649 | struct trace_array *tr = filp->private_data; | 3719 | struct ftrace_entries_info *info = filp->private_data; |
| 3650 | char buf[96]; | 3720 | struct trace_array *tr = info->tr; |
| 3651 | int r; | 3721 | char buf[64]; |
| 3722 | int r = 0; | ||
| 3723 | ssize_t ret; | ||
| 3652 | 3724 | ||
| 3653 | mutex_lock(&trace_types_lock); | 3725 | mutex_lock(&trace_types_lock); |
| 3654 | if (!ring_buffer_expanded) | 3726 | |
| 3655 | r = sprintf(buf, "%lu (expanded: %lu)\n", | 3727 | if (info->cpu == RING_BUFFER_ALL_CPUS) { |
| 3656 | tr->entries >> 10, | 3728 | int cpu, buf_size_same; |
| 3657 | trace_buf_size >> 10); | 3729 | unsigned long size; |
| 3658 | else | 3730 | |
| 3659 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | 3731 | size = 0; |
| 3732 | buf_size_same = 1; | ||
| 3733 | /* check if all cpu sizes are same */ | ||
| 3734 | for_each_tracing_cpu(cpu) { | ||
| 3735 | /* fill in the size from first enabled cpu */ | ||
| 3736 | if (size == 0) | ||
| 3737 | size = tr->data[cpu]->entries; | ||
| 3738 | if (size != tr->data[cpu]->entries) { | ||
| 3739 | buf_size_same = 0; | ||
| 3740 | break; | ||
| 3741 | } | ||
| 3742 | } | ||
| 3743 | |||
| 3744 | if (buf_size_same) { | ||
| 3745 | if (!ring_buffer_expanded) | ||
| 3746 | r = sprintf(buf, "%lu (expanded: %lu)\n", | ||
| 3747 | size >> 10, | ||
| 3748 | trace_buf_size >> 10); | ||
| 3749 | else | ||
| 3750 | r = sprintf(buf, "%lu\n", size >> 10); | ||
| 3751 | } else | ||
| 3752 | r = sprintf(buf, "X\n"); | ||
| 3753 | } else | ||
| 3754 | r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); | ||
| 3755 | |||
| 3660 | mutex_unlock(&trace_types_lock); | 3756 | mutex_unlock(&trace_types_lock); |
| 3661 | 3757 | ||
| 3662 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3758 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 3759 | return ret; | ||
| 3663 | } | 3760 | } |
| 3664 | 3761 | ||
| 3665 | static ssize_t | 3762 | static ssize_t |
| 3666 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 3763 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
| 3667 | size_t cnt, loff_t *ppos) | 3764 | size_t cnt, loff_t *ppos) |
| 3668 | { | 3765 | { |
| 3766 | struct ftrace_entries_info *info = filp->private_data; | ||
| 3669 | unsigned long val; | 3767 | unsigned long val; |
| 3670 | int ret; | 3768 | int ret; |
| 3671 | 3769 | ||
| @@ -3680,7 +3778,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3680 | /* value is in KB */ | 3778 | /* value is in KB */ |
| 3681 | val <<= 10; | 3779 | val <<= 10; |
| 3682 | 3780 | ||
| 3683 | ret = tracing_resize_ring_buffer(val); | 3781 | ret = tracing_resize_ring_buffer(val, info->cpu); |
| 3684 | if (ret < 0) | 3782 | if (ret < 0) |
| 3685 | return ret; | 3783 | return ret; |
| 3686 | 3784 | ||
| @@ -3689,6 +3787,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3689 | return cnt; | 3787 | return cnt; |
| 3690 | } | 3788 | } |
| 3691 | 3789 | ||
| 3790 | static int | ||
| 3791 | tracing_entries_release(struct inode *inode, struct file *filp) | ||
| 3792 | { | ||
| 3793 | struct ftrace_entries_info *info = filp->private_data; | ||
| 3794 | |||
| 3795 | kfree(info); | ||
| 3796 | |||
| 3797 | return 0; | ||
| 3798 | } | ||
| 3799 | |||
| 3692 | static ssize_t | 3800 | static ssize_t |
| 3693 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | 3801 | tracing_total_entries_read(struct file *filp, char __user *ubuf, |
| 3694 | size_t cnt, loff_t *ppos) | 3802 | size_t cnt, loff_t *ppos) |
| @@ -3700,7 +3808,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, | |||
| 3700 | 3808 | ||
| 3701 | mutex_lock(&trace_types_lock); | 3809 | mutex_lock(&trace_types_lock); |
| 3702 | for_each_tracing_cpu(cpu) { | 3810 | for_each_tracing_cpu(cpu) { |
| 3703 | size += tr->entries >> 10; | 3811 | size += tr->data[cpu]->entries >> 10; |
| 3704 | if (!ring_buffer_expanded) | 3812 | if (!ring_buffer_expanded) |
| 3705 | expanded_size += trace_buf_size >> 10; | 3813 | expanded_size += trace_buf_size >> 10; |
| 3706 | } | 3814 | } |
| @@ -3734,7 +3842,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) | |||
| 3734 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 3842 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
| 3735 | tracing_off(); | 3843 | tracing_off(); |
| 3736 | /* resize the ring buffer to 0 */ | 3844 | /* resize the ring buffer to 0 */ |
| 3737 | tracing_resize_ring_buffer(0); | 3845 | tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); |
| 3738 | 3846 | ||
| 3739 | return 0; | 3847 | return 0; |
| 3740 | } | 3848 | } |
| @@ -3749,14 +3857,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3749 | struct print_entry *entry; | 3857 | struct print_entry *entry; |
| 3750 | unsigned long irq_flags; | 3858 | unsigned long irq_flags; |
| 3751 | struct page *pages[2]; | 3859 | struct page *pages[2]; |
| 3860 | void *map_page[2]; | ||
| 3752 | int nr_pages = 1; | 3861 | int nr_pages = 1; |
| 3753 | ssize_t written; | 3862 | ssize_t written; |
| 3754 | void *page1; | ||
| 3755 | void *page2; | ||
| 3756 | int offset; | 3863 | int offset; |
| 3757 | int size; | 3864 | int size; |
| 3758 | int len; | 3865 | int len; |
| 3759 | int ret; | 3866 | int ret; |
| 3867 | int i; | ||
| 3760 | 3868 | ||
| 3761 | if (tracing_disabled) | 3869 | if (tracing_disabled) |
| 3762 | return -EINVAL; | 3870 | return -EINVAL; |
| @@ -3795,9 +3903,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3795 | goto out; | 3903 | goto out; |
| 3796 | } | 3904 | } |
| 3797 | 3905 | ||
| 3798 | page1 = kmap_atomic(pages[0]); | 3906 | for (i = 0; i < nr_pages; i++) |
| 3799 | if (nr_pages == 2) | 3907 | map_page[i] = kmap_atomic(pages[i]); |
| 3800 | page2 = kmap_atomic(pages[1]); | ||
| 3801 | 3908 | ||
| 3802 | local_save_flags(irq_flags); | 3909 | local_save_flags(irq_flags); |
| 3803 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ | 3910 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ |
| @@ -3815,10 +3922,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3815 | 3922 | ||
| 3816 | if (nr_pages == 2) { | 3923 | if (nr_pages == 2) { |
| 3817 | len = PAGE_SIZE - offset; | 3924 | len = PAGE_SIZE - offset; |
| 3818 | memcpy(&entry->buf, page1 + offset, len); | 3925 | memcpy(&entry->buf, map_page[0] + offset, len); |
| 3819 | memcpy(&entry->buf[len], page2, cnt - len); | 3926 | memcpy(&entry->buf[len], map_page[1], cnt - len); |
| 3820 | } else | 3927 | } else |
| 3821 | memcpy(&entry->buf, page1 + offset, cnt); | 3928 | memcpy(&entry->buf, map_page[0] + offset, cnt); |
| 3822 | 3929 | ||
| 3823 | if (entry->buf[cnt - 1] != '\n') { | 3930 | if (entry->buf[cnt - 1] != '\n') { |
| 3824 | entry->buf[cnt] = '\n'; | 3931 | entry->buf[cnt] = '\n'; |
| @@ -3833,11 +3940,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3833 | *fpos += written; | 3940 | *fpos += written; |
| 3834 | 3941 | ||
| 3835 | out_unlock: | 3942 | out_unlock: |
| 3836 | if (nr_pages == 2) | 3943 | for (i = 0; i < nr_pages; i++){ |
| 3837 | kunmap_atomic(page2); | 3944 | kunmap_atomic(map_page[i]); |
| 3838 | kunmap_atomic(page1); | 3945 | put_page(pages[i]); |
| 3839 | while (nr_pages > 0) | 3946 | } |
| 3840 | put_page(pages[--nr_pages]); | ||
| 3841 | out: | 3947 | out: |
| 3842 | return written; | 3948 | return written; |
| 3843 | } | 3949 | } |
| @@ -3933,9 +4039,10 @@ static const struct file_operations tracing_pipe_fops = { | |||
| 3933 | }; | 4039 | }; |
| 3934 | 4040 | ||
| 3935 | static const struct file_operations tracing_entries_fops = { | 4041 | static const struct file_operations tracing_entries_fops = { |
| 3936 | .open = tracing_open_generic, | 4042 | .open = tracing_entries_open, |
| 3937 | .read = tracing_entries_read, | 4043 | .read = tracing_entries_read, |
| 3938 | .write = tracing_entries_write, | 4044 | .write = tracing_entries_write, |
| 4045 | .release = tracing_entries_release, | ||
| 3939 | .llseek = generic_file_llseek, | 4046 | .llseek = generic_file_llseek, |
| 3940 | }; | 4047 | }; |
| 3941 | 4048 | ||
| @@ -4367,6 +4474,9 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
| 4367 | struct dentry *d_cpu; | 4474 | struct dentry *d_cpu; |
| 4368 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 4475 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
| 4369 | 4476 | ||
| 4477 | if (!d_percpu) | ||
| 4478 | return; | ||
| 4479 | |||
| 4370 | snprintf(cpu_dir, 30, "cpu%ld", cpu); | 4480 | snprintf(cpu_dir, 30, "cpu%ld", cpu); |
| 4371 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 4481 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); |
| 4372 | if (!d_cpu) { | 4482 | if (!d_cpu) { |
| @@ -4387,6 +4497,9 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
| 4387 | 4497 | ||
| 4388 | trace_create_file("stats", 0444, d_cpu, | 4498 | trace_create_file("stats", 0444, d_cpu, |
| 4389 | (void *) cpu, &tracing_stats_fops); | 4499 | (void *) cpu, &tracing_stats_fops); |
| 4500 | |||
| 4501 | trace_create_file("buffer_size_kb", 0444, d_cpu, | ||
| 4502 | (void *) cpu, &tracing_entries_fops); | ||
| 4390 | } | 4503 | } |
| 4391 | 4504 | ||
| 4392 | #ifdef CONFIG_FTRACE_SELFTEST | 4505 | #ifdef CONFIG_FTRACE_SELFTEST |
| @@ -4718,7 +4831,7 @@ static __init int tracer_init_debugfs(void) | |||
| 4718 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 4831 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
| 4719 | 4832 | ||
| 4720 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 4833 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
| 4721 | &global_trace, &tracing_entries_fops); | 4834 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); |
| 4722 | 4835 | ||
| 4723 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 4836 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, |
| 4724 | &global_trace, &tracing_total_entries_fops); | 4837 | &global_trace, &tracing_total_entries_fops); |
| @@ -4957,6 +5070,10 @@ __init static int tracer_alloc_buffers(void) | |||
| 4957 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 5070 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
| 4958 | goto out_free_buffer_mask; | 5071 | goto out_free_buffer_mask; |
| 4959 | 5072 | ||
| 5073 | /* Only allocate trace_printk buffers if a trace_printk exists */ | ||
| 5074 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | ||
| 5075 | trace_printk_init_buffers(); | ||
| 5076 | |||
| 4960 | /* To save memory, keep the ring buffer size to its minimum */ | 5077 | /* To save memory, keep the ring buffer size to its minimum */ |
| 4961 | if (ring_buffer_expanded) | 5078 | if (ring_buffer_expanded) |
| 4962 | ring_buf_size = trace_buf_size; | 5079 | ring_buf_size = trace_buf_size; |
| @@ -4975,7 +5092,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4975 | WARN_ON(1); | 5092 | WARN_ON(1); |
| 4976 | goto out_free_cpumask; | 5093 | goto out_free_cpumask; |
| 4977 | } | 5094 | } |
| 4978 | global_trace.entries = ring_buffer_size(global_trace.buffer); | ||
| 4979 | if (global_trace.buffer_disabled) | 5095 | if (global_trace.buffer_disabled) |
| 4980 | tracing_off(); | 5096 | tracing_off(); |
| 4981 | 5097 | ||
| @@ -4988,7 +5104,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4988 | ring_buffer_free(global_trace.buffer); | 5104 | ring_buffer_free(global_trace.buffer); |
| 4989 | goto out_free_cpumask; | 5105 | goto out_free_cpumask; |
| 4990 | } | 5106 | } |
| 4991 | max_tr.entries = 1; | ||
| 4992 | #endif | 5107 | #endif |
| 4993 | 5108 | ||
| 4994 | /* Allocate the first page for all buffers */ | 5109 | /* Allocate the first page for all buffers */ |
| @@ -4997,6 +5112,12 @@ __init static int tracer_alloc_buffers(void) | |||
| 4997 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 5112 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
| 4998 | } | 5113 | } |
| 4999 | 5114 | ||
| 5115 | set_buffer_entries(&global_trace, | ||
| 5116 | ring_buffer_size(global_trace.buffer, 0)); | ||
| 5117 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 5118 | set_buffer_entries(&max_tr, 1); | ||
| 5119 | #endif | ||
| 5120 | |||
| 5000 | trace_init_cmdlines(); | 5121 | trace_init_cmdlines(); |
| 5001 | 5122 | ||
| 5002 | register_tracer(&nop_trace); | 5123 | register_tracer(&nop_trace); |
