diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 257 |
1 files changed, 203 insertions, 54 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5c9c6d907054..efe3202c0209 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -45,6 +45,12 @@ unsigned long __read_mostly tracing_max_latency; | |||
45 | unsigned long __read_mostly tracing_thresh; | 45 | unsigned long __read_mostly tracing_thresh; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * On boot up, the ring buffer is set to the minimum size, so that | ||
49 | * we do not waste memory on systems that are not using tracing. | ||
50 | */ | ||
51 | static int ring_buffer_expanded; | ||
52 | |||
53 | /* | ||
48 | * We need to change this state when a selftest is running. | 54 | * We need to change this state when a selftest is running. |
49 | * A selftest will lurk into the ring-buffer to count the | 55 | * A selftest will lurk into the ring-buffer to count the |
50 | * entries inserted during the selftest although some concurrent | 56 | * entries inserted during the selftest although some concurrent |
@@ -128,6 +134,8 @@ static int __init set_ftrace(char *str) | |||
128 | { | 134 | { |
129 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 135 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); |
130 | default_bootup_tracer = bootup_tracer_buf; | 136 | default_bootup_tracer = bootup_tracer_buf; |
137 | /* We are using ftrace early, expand it */ | ||
138 | ring_buffer_expanded = 1; | ||
131 | return 1; | 139 | return 1; |
132 | } | 140 | } |
133 | __setup("ftrace=", set_ftrace); | 141 | __setup("ftrace=", set_ftrace); |
@@ -1171,10 +1179,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1171 | 1179 | ||
1172 | 1180 | ||
1173 | /** | 1181 | /** |
1174 | * trace_vprintk - write binary msg to tracing buffer | 1182 | * trace_vbprintk - write binary msg to tracing buffer |
1175 | * | 1183 | * |
1176 | */ | 1184 | */ |
1177 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | 1185 | int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
1178 | { | 1186 | { |
1179 | static raw_spinlock_t trace_buf_lock = | 1187 | static raw_spinlock_t trace_buf_lock = |
1180 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1188 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
@@ -1183,7 +1191,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
1183 | struct ring_buffer_event *event; | 1191 | struct ring_buffer_event *event; |
1184 | struct trace_array *tr = &global_trace; | 1192 | struct trace_array *tr = &global_trace; |
1185 | struct trace_array_cpu *data; | 1193 | struct trace_array_cpu *data; |
1186 | struct print_entry *entry; | 1194 | struct bprint_entry *entry; |
1187 | unsigned long flags; | 1195 | unsigned long flags; |
1188 | int resched; | 1196 | int resched; |
1189 | int cpu, len = 0, size, pc; | 1197 | int cpu, len = 0, size, pc; |
@@ -1211,7 +1219,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
1211 | goto out_unlock; | 1219 | goto out_unlock; |
1212 | 1220 | ||
1213 | size = sizeof(*entry) + sizeof(u32) * len; | 1221 | size = sizeof(*entry) + sizeof(u32) * len; |
1214 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc); | 1222 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); |
1215 | if (!event) | 1223 | if (!event) |
1216 | goto out_unlock; | 1224 | goto out_unlock; |
1217 | entry = ring_buffer_event_data(event); | 1225 | entry = ring_buffer_event_data(event); |
@@ -1232,6 +1240,60 @@ out: | |||
1232 | 1240 | ||
1233 | return len; | 1241 | return len; |
1234 | } | 1242 | } |
1243 | EXPORT_SYMBOL_GPL(trace_vbprintk); | ||
1244 | |||
1245 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
1246 | { | ||
1247 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | ||
1248 | static char trace_buf[TRACE_BUF_SIZE]; | ||
1249 | |||
1250 | struct ring_buffer_event *event; | ||
1251 | struct trace_array *tr = &global_trace; | ||
1252 | struct trace_array_cpu *data; | ||
1253 | int cpu, len = 0, size, pc; | ||
1254 | struct print_entry *entry; | ||
1255 | unsigned long irq_flags; | ||
1256 | |||
1257 | if (tracing_disabled || tracing_selftest_running) | ||
1258 | return 0; | ||
1259 | |||
1260 | pc = preempt_count(); | ||
1261 | preempt_disable_notrace(); | ||
1262 | cpu = raw_smp_processor_id(); | ||
1263 | data = tr->data[cpu]; | ||
1264 | |||
1265 | if (unlikely(atomic_read(&data->disabled))) | ||
1266 | goto out; | ||
1267 | |||
1268 | pause_graph_tracing(); | ||
1269 | raw_local_irq_save(irq_flags); | ||
1270 | __raw_spin_lock(&trace_buf_lock); | ||
1271 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1272 | |||
1273 | len = min(len, TRACE_BUF_SIZE-1); | ||
1274 | trace_buf[len] = 0; | ||
1275 | |||
1276 | size = sizeof(*entry) + len + 1; | ||
1277 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | ||
1278 | if (!event) | ||
1279 | goto out_unlock; | ||
1280 | entry = ring_buffer_event_data(event); | ||
1281 | entry->ip = ip; | ||
1282 | entry->depth = depth; | ||
1283 | |||
1284 | memcpy(&entry->buf, trace_buf, len); | ||
1285 | entry->buf[len] = 0; | ||
1286 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1287 | |||
1288 | out_unlock: | ||
1289 | __raw_spin_unlock(&trace_buf_lock); | ||
1290 | raw_local_irq_restore(irq_flags); | ||
1291 | unpause_graph_tracing(); | ||
1292 | out: | ||
1293 | preempt_enable_notrace(); | ||
1294 | |||
1295 | return len; | ||
1296 | } | ||
1235 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1297 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1236 | 1298 | ||
1237 | enum trace_file_type { | 1299 | enum trace_file_type { |
@@ -1620,6 +1682,22 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1620 | return TRACE_TYPE_HANDLED; | 1682 | return TRACE_TYPE_HANDLED; |
1621 | } | 1683 | } |
1622 | 1684 | ||
1685 | static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter) | ||
1686 | { | ||
1687 | struct trace_seq *s = &iter->seq; | ||
1688 | struct trace_entry *entry = iter->ent; | ||
1689 | struct bprint_entry *field; | ||
1690 | int ret; | ||
1691 | |||
1692 | trace_assign_type(field, entry); | ||
1693 | |||
1694 | ret = trace_seq_bprintf(s, field->fmt, field->buf); | ||
1695 | if (!ret) | ||
1696 | return TRACE_TYPE_PARTIAL_LINE; | ||
1697 | |||
1698 | return TRACE_TYPE_HANDLED; | ||
1699 | } | ||
1700 | |||
1623 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | 1701 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) |
1624 | { | 1702 | { |
1625 | struct trace_seq *s = &iter->seq; | 1703 | struct trace_seq *s = &iter->seq; |
@@ -1629,7 +1707,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | |||
1629 | 1707 | ||
1630 | trace_assign_type(field, entry); | 1708 | trace_assign_type(field, entry); |
1631 | 1709 | ||
1632 | ret = trace_seq_bprintf(s, field->fmt, field->buf); | 1710 | ret = trace_seq_printf(s, "%s", field->buf); |
1633 | if (!ret) | 1711 | if (!ret) |
1634 | return TRACE_TYPE_PARTIAL_LINE; | 1712 | return TRACE_TYPE_PARTIAL_LINE; |
1635 | 1713 | ||
@@ -1658,6 +1736,19 @@ static int trace_empty(struct trace_iterator *iter) | |||
1658 | { | 1736 | { |
1659 | int cpu; | 1737 | int cpu; |
1660 | 1738 | ||
1739 | /* If we are looking at one CPU buffer, only check that one */ | ||
1740 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | ||
1741 | cpu = iter->cpu_file; | ||
1742 | if (iter->buffer_iter[cpu]) { | ||
1743 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | ||
1744 | return 0; | ||
1745 | } else { | ||
1746 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | ||
1747 | return 0; | ||
1748 | } | ||
1749 | return 1; | ||
1750 | } | ||
1751 | |||
1661 | for_each_tracing_cpu(cpu) { | 1752 | for_each_tracing_cpu(cpu) { |
1662 | if (iter->buffer_iter[cpu]) { | 1753 | if (iter->buffer_iter[cpu]) { |
1663 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 1754 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) |
@@ -1681,6 +1772,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1681 | return ret; | 1772 | return ret; |
1682 | } | 1773 | } |
1683 | 1774 | ||
1775 | if (iter->ent->type == TRACE_BPRINT && | ||
1776 | trace_flags & TRACE_ITER_PRINTK && | ||
1777 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | ||
1778 | return print_bprintk_msg_only(iter); | ||
1779 | |||
1684 | if (iter->ent->type == TRACE_PRINT && | 1780 | if (iter->ent->type == TRACE_PRINT && |
1685 | trace_flags & TRACE_ITER_PRINTK && | 1781 | trace_flags & TRACE_ITER_PRINTK && |
1686 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 1782 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) |
@@ -1784,17 +1880,11 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1784 | 1880 | ||
1785 | iter->buffer_iter[cpu] = | 1881 | iter->buffer_iter[cpu] = |
1786 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1882 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1787 | |||
1788 | if (!iter->buffer_iter[cpu]) | ||
1789 | goto fail_buffer; | ||
1790 | } | 1883 | } |
1791 | } else { | 1884 | } else { |
1792 | cpu = iter->cpu_file; | 1885 | cpu = iter->cpu_file; |
1793 | iter->buffer_iter[cpu] = | 1886 | iter->buffer_iter[cpu] = |
1794 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1887 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1795 | |||
1796 | if (!iter->buffer_iter[cpu]) | ||
1797 | goto fail; | ||
1798 | } | 1888 | } |
1799 | 1889 | ||
1800 | /* TODO stop tracer */ | 1890 | /* TODO stop tracer */ |
@@ -2315,6 +2405,75 @@ int tracer_init(struct tracer *t, struct trace_array *tr) | |||
2315 | return t->init(tr); | 2405 | return t->init(tr); |
2316 | } | 2406 | } |
2317 | 2407 | ||
2408 | static int tracing_resize_ring_buffer(unsigned long size) | ||
2409 | { | ||
2410 | int ret; | ||
2411 | |||
2412 | /* | ||
2413 | * If kernel or user changes the size of the ring buffer | ||
2414 | * we use the size that was given, and we can forget about | ||
2415 | * expanding it later. | ||
2416 | */ | ||
2417 | ring_buffer_expanded = 1; | ||
2418 | |||
2419 | ret = ring_buffer_resize(global_trace.buffer, size); | ||
2420 | if (ret < 0) | ||
2421 | return ret; | ||
2422 | |||
2423 | ret = ring_buffer_resize(max_tr.buffer, size); | ||
2424 | if (ret < 0) { | ||
2425 | int r; | ||
2426 | |||
2427 | r = ring_buffer_resize(global_trace.buffer, | ||
2428 | global_trace.entries); | ||
2429 | if (r < 0) { | ||
2430 | /* | ||
2431 | * AARGH! We are left with different | ||
2432 | * size max buffer!!!! | ||
2433 | * The max buffer is our "snapshot" buffer. | ||
2434 | * When a tracer needs a snapshot (one of the | ||
2435 | * latency tracers), it swaps the max buffer | ||
2436 | * with the saved snap shot. We succeeded to | ||
2437 | * update the size of the main buffer, but failed to | ||
2438 | * update the size of the max buffer. But when we tried | ||
2439 | * to reset the main buffer to the original size, we | ||
2440 | * failed there too. This is very unlikely to | ||
2441 | * happen, but if it does, warn and kill all | ||
2442 | * tracing. | ||
2443 | */ | ||
2444 | WARN_ON(1); | ||
2445 | tracing_disabled = 1; | ||
2446 | } | ||
2447 | return ret; | ||
2448 | } | ||
2449 | |||
2450 | global_trace.entries = size; | ||
2451 | |||
2452 | return ret; | ||
2453 | } | ||
2454 | |||
2455 | /** | ||
2456 | * tracing_update_buffers - used by tracing facility to expand ring buffers | ||
2457 | * | ||
2458 | * To save on memory when the tracing is never used on a system with it | ||
2459 | * configured in. The ring buffers are set to a minimum size. But once | ||
2460 | * a user starts to use the tracing facility, then they need to grow | ||
2461 | * to their default size. | ||
2462 | * | ||
2463 | * This function is to be called when a tracer is about to be used. | ||
2464 | */ | ||
2465 | int tracing_update_buffers(void) | ||
2466 | { | ||
2467 | int ret = 0; | ||
2468 | |||
2469 | mutex_lock(&trace_types_lock); | ||
2470 | if (!ring_buffer_expanded) | ||
2471 | ret = tracing_resize_ring_buffer(trace_buf_size); | ||
2472 | mutex_unlock(&trace_types_lock); | ||
2473 | |||
2474 | return ret; | ||
2475 | } | ||
2476 | |||
2318 | struct trace_option_dentry; | 2477 | struct trace_option_dentry; |
2319 | 2478 | ||
2320 | static struct trace_option_dentry * | 2479 | static struct trace_option_dentry * |
@@ -2331,6 +2490,14 @@ static int tracing_set_tracer(const char *buf) | |||
2331 | int ret = 0; | 2490 | int ret = 0; |
2332 | 2491 | ||
2333 | mutex_lock(&trace_types_lock); | 2492 | mutex_lock(&trace_types_lock); |
2493 | |||
2494 | if (!ring_buffer_expanded) { | ||
2495 | ret = tracing_resize_ring_buffer(trace_buf_size); | ||
2496 | if (ret < 0) | ||
2497 | return ret; | ||
2498 | ret = 0; | ||
2499 | } | ||
2500 | |||
2334 | for (t = trace_types; t; t = t->next) { | 2501 | for (t = trace_types; t; t = t->next) { |
2335 | if (strcmp(t->name, buf) == 0) | 2502 | if (strcmp(t->name, buf) == 0) |
2336 | break; | 2503 | break; |
@@ -2856,10 +3023,18 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
2856 | size_t cnt, loff_t *ppos) | 3023 | size_t cnt, loff_t *ppos) |
2857 | { | 3024 | { |
2858 | struct trace_array *tr = filp->private_data; | 3025 | struct trace_array *tr = filp->private_data; |
2859 | char buf[64]; | 3026 | char buf[96]; |
2860 | int r; | 3027 | int r; |
2861 | 3028 | ||
2862 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | 3029 | mutex_lock(&trace_types_lock); |
3030 | if (!ring_buffer_expanded) | ||
3031 | r = sprintf(buf, "%lu (expanded: %lu)\n", | ||
3032 | tr->entries >> 10, | ||
3033 | trace_buf_size >> 10); | ||
3034 | else | ||
3035 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | ||
3036 | mutex_unlock(&trace_types_lock); | ||
3037 | |||
2863 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3038 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2864 | } | 3039 | } |
2865 | 3040 | ||
@@ -2903,28 +3078,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2903 | val <<= 10; | 3078 | val <<= 10; |
2904 | 3079 | ||
2905 | if (val != global_trace.entries) { | 3080 | if (val != global_trace.entries) { |
2906 | ret = ring_buffer_resize(global_trace.buffer, val); | 3081 | ret = tracing_resize_ring_buffer(val); |
2907 | if (ret < 0) { | 3082 | if (ret < 0) { |
2908 | cnt = ret; | 3083 | cnt = ret; |
2909 | goto out; | 3084 | goto out; |
2910 | } | 3085 | } |
2911 | |||
2912 | ret = ring_buffer_resize(max_tr.buffer, val); | ||
2913 | if (ret < 0) { | ||
2914 | int r; | ||
2915 | cnt = ret; | ||
2916 | r = ring_buffer_resize(global_trace.buffer, | ||
2917 | global_trace.entries); | ||
2918 | if (r < 0) { | ||
2919 | /* AARGH! We are left with different | ||
2920 | * size max buffer!!!! */ | ||
2921 | WARN_ON(1); | ||
2922 | tracing_disabled = 1; | ||
2923 | } | ||
2924 | goto out; | ||
2925 | } | ||
2926 | |||
2927 | global_trace.entries = val; | ||
2928 | } | 3086 | } |
2929 | 3087 | ||
2930 | filp->f_pos += cnt; | 3088 | filp->f_pos += cnt; |
@@ -3385,6 +3543,11 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3385 | (void *) cpu, &tracing_fops); | 3543 | (void *) cpu, &tracing_fops); |
3386 | if (!entry) | 3544 | if (!entry) |
3387 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3545 | pr_warning("Could not create debugfs 'trace' entry\n"); |
3546 | |||
3547 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | ||
3548 | (void *) cpu, &tracing_buffers_fops); | ||
3549 | if (!entry) | ||
3550 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | ||
3388 | } | 3551 | } |
3389 | 3552 | ||
3390 | #ifdef CONFIG_FTRACE_SELFTEST | 3553 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -3668,7 +3831,6 @@ static __init void create_trace_options_dir(void) | |||
3668 | static __init int tracer_init_debugfs(void) | 3831 | static __init int tracer_init_debugfs(void) |
3669 | { | 3832 | { |
3670 | struct dentry *d_tracer; | 3833 | struct dentry *d_tracer; |
3671 | struct dentry *buffers; | ||
3672 | struct dentry *entry; | 3834 | struct dentry *entry; |
3673 | int cpu; | 3835 | int cpu; |
3674 | 3836 | ||
@@ -3741,26 +3903,6 @@ static __init int tracer_init_debugfs(void) | |||
3741 | pr_warning("Could not create debugfs " | 3903 | pr_warning("Could not create debugfs " |
3742 | "'trace_marker' entry\n"); | 3904 | "'trace_marker' entry\n"); |
3743 | 3905 | ||
3744 | buffers = debugfs_create_dir("binary_buffers", d_tracer); | ||
3745 | |||
3746 | if (!buffers) | ||
3747 | pr_warning("Could not create buffers directory\n"); | ||
3748 | else { | ||
3749 | int cpu; | ||
3750 | char buf[64]; | ||
3751 | |||
3752 | for_each_tracing_cpu(cpu) { | ||
3753 | sprintf(buf, "%d", cpu); | ||
3754 | |||
3755 | entry = debugfs_create_file(buf, 0444, buffers, | ||
3756 | (void *)(long)cpu, | ||
3757 | &tracing_buffers_fops); | ||
3758 | if (!entry) | ||
3759 | pr_warning("Could not create debugfs buffers " | ||
3760 | "'%s' entry\n", buf); | ||
3761 | } | ||
3762 | } | ||
3763 | |||
3764 | #ifdef CONFIG_DYNAMIC_FTRACE | 3906 | #ifdef CONFIG_DYNAMIC_FTRACE |
3765 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3907 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
3766 | &ftrace_update_tot_cnt, | 3908 | &ftrace_update_tot_cnt, |
@@ -3916,6 +4058,7 @@ void ftrace_dump(void) | |||
3916 | __init static int tracer_alloc_buffers(void) | 4058 | __init static int tracer_alloc_buffers(void) |
3917 | { | 4059 | { |
3918 | struct trace_array_cpu *data; | 4060 | struct trace_array_cpu *data; |
4061 | int ring_buf_size; | ||
3919 | int i; | 4062 | int i; |
3920 | int ret = -ENOMEM; | 4063 | int ret = -ENOMEM; |
3921 | 4064 | ||
@@ -3928,12 +4071,18 @@ __init static int tracer_alloc_buffers(void) | |||
3928 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4071 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
3929 | goto out_free_tracing_cpumask; | 4072 | goto out_free_tracing_cpumask; |
3930 | 4073 | ||
4074 | /* To save memory, keep the ring buffer size to its minimum */ | ||
4075 | if (ring_buffer_expanded) | ||
4076 | ring_buf_size = trace_buf_size; | ||
4077 | else | ||
4078 | ring_buf_size = 1; | ||
4079 | |||
3931 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4080 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
3932 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4081 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
3933 | cpumask_clear(tracing_reader_cpumask); | 4082 | cpumask_clear(tracing_reader_cpumask); |
3934 | 4083 | ||
3935 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4084 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
3936 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 4085 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
3937 | TRACE_BUFFER_FLAGS); | 4086 | TRACE_BUFFER_FLAGS); |
3938 | if (!global_trace.buffer) { | 4087 | if (!global_trace.buffer) { |
3939 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 4088 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
@@ -3944,7 +4093,7 @@ __init static int tracer_alloc_buffers(void) | |||
3944 | 4093 | ||
3945 | 4094 | ||
3946 | #ifdef CONFIG_TRACER_MAX_TRACE | 4095 | #ifdef CONFIG_TRACER_MAX_TRACE |
3947 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 4096 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, |
3948 | TRACE_BUFFER_FLAGS); | 4097 | TRACE_BUFFER_FLAGS); |
3949 | if (!max_tr.buffer) { | 4098 | if (!max_tr.buffer) { |
3950 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4099 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |