diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-02-24 21:22:28 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 07:40:58 -0500 |
commit | b04cc6b1f6398b0e0b60d37e27ce51b4899672ec (patch) | |
tree | 61d5bd59b364913f5a124d25925b908c5deaf871 | |
parent | 2b1b858f690d6369a59ad241335eeedec6eb0c8c (diff) |
tracing/core: introduce per cpu tracing files
Impact: split up tracing output per cpu
Currently, on the tracing debugfs directory, three files are
available to the user to let him extracting the trace output:
- trace is an iterator through the ring-buffer. It's a reader
but not a consumer It doesn't block when no more traces are
available.
- trace pretty similar to the former, except that it adds more
informations such as prempt count, irq flag, ...
- trace_pipe is a reader and a consumer, it will also block
waiting for traces if necessary (heh, yes it's a pipe).
The traces coming from different cpus are curretly mixed up
inside these files. Sometimes it messes up the informations,
sometimes it's useful, depending on what does the tracer
capture.
The tracing_cpumask file is useful to filter the output and
select only the traces captured a custom defined set of cpus.
But still it is not enough powerful to extract at the same time
one trace buffer per cpu.
So this patch creates a new directory: /debug/tracing/per_cpu/.
Inside this directory, you will now find one trace_pipe file and
one trace file per cpu.
Which means if you have two cpus, you will have:
trace0
trace1
trace_pipe0
trace_pipe1
And of course, reading these files will have the same effect
than with the usual tracing files, except that you will only see
the traces from the given cpu.
The original all-in-one cpu trace file are still available on
their original place.
Until now, only one consumer was allowed on trace_pipe to avoid
racy consuming on the ring-buffer. Now the approach changed a
bit, you can have only one consumer per cpu.
Which means you are allowed to read concurrently trace_pipe0 and
trace_pipe1 But you can't have two readers on trace_pipe0 or
trace_pipe1.
Following the same logic, if there is one reader on the common
trace_pipe, you can not have at the same time another reader on
trace_pipe0 or in trace_pipe1. Because in trace_pipe is already
a consumer in all cpu buffers in essence.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/trace/trace.c | 168 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 |
2 files changed, 147 insertions, 24 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 11ba100f9a9..aa58b7bc847 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -98,6 +98,9 @@ static inline void ftrace_enable_cpu(void) | |||
98 | 98 | ||
99 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 99 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
100 | 100 | ||
101 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
102 | static cpumask_var_t tracing_reader_cpumask; | ||
103 | |||
101 | #define for_each_tracing_cpu(cpu) \ | 104 | #define for_each_tracing_cpu(cpu) \ |
102 | for_each_cpu(cpu, tracing_buffer_mask) | 105 | for_each_cpu(cpu, tracing_buffer_mask) |
103 | 106 | ||
@@ -1195,10 +1198,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1195 | { | 1198 | { |
1196 | struct ring_buffer *buffer = iter->tr->buffer; | 1199 | struct ring_buffer *buffer = iter->tr->buffer; |
1197 | struct trace_entry *ent, *next = NULL; | 1200 | struct trace_entry *ent, *next = NULL; |
1201 | int cpu_file = iter->cpu_file; | ||
1198 | u64 next_ts = 0, ts; | 1202 | u64 next_ts = 0, ts; |
1199 | int next_cpu = -1; | 1203 | int next_cpu = -1; |
1200 | int cpu; | 1204 | int cpu; |
1201 | 1205 | ||
1206 | /* | ||
1207 | * If we are in a per_cpu trace file, don't bother by iterating over | ||
1208 | * all cpu and peek directly. | ||
1209 | */ | ||
1210 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | ||
1211 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | ||
1212 | return NULL; | ||
1213 | ent = peek_next_entry(iter, cpu_file, ent_ts); | ||
1214 | if (ent_cpu) | ||
1215 | *ent_cpu = cpu_file; | ||
1216 | |||
1217 | return ent; | ||
1218 | } | ||
1219 | |||
1202 | for_each_tracing_cpu(cpu) { | 1220 | for_each_tracing_cpu(cpu) { |
1203 | 1221 | ||
1204 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1222 | if (ring_buffer_empty_cpu(buffer, cpu)) |
@@ -1279,6 +1297,7 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1279 | static void *s_start(struct seq_file *m, loff_t *pos) | 1297 | static void *s_start(struct seq_file *m, loff_t *pos) |
1280 | { | 1298 | { |
1281 | struct trace_iterator *iter = m->private; | 1299 | struct trace_iterator *iter = m->private; |
1300 | int cpu_file = iter->cpu_file; | ||
1282 | void *p = NULL; | 1301 | void *p = NULL; |
1283 | loff_t l = 0; | 1302 | loff_t l = 0; |
1284 | int cpu; | 1303 | int cpu; |
@@ -1299,9 +1318,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1299 | 1318 | ||
1300 | ftrace_disable_cpu(); | 1319 | ftrace_disable_cpu(); |
1301 | 1320 | ||
1302 | for_each_tracing_cpu(cpu) { | 1321 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1303 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1322 | for_each_tracing_cpu(cpu) |
1304 | } | 1323 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); |
1324 | } else | ||
1325 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | ||
1326 | |||
1305 | 1327 | ||
1306 | ftrace_enable_cpu(); | 1328 | ftrace_enable_cpu(); |
1307 | 1329 | ||
@@ -1653,6 +1675,7 @@ static struct seq_operations tracer_seq_ops = { | |||
1653 | static struct trace_iterator * | 1675 | static struct trace_iterator * |
1654 | __tracing_open(struct inode *inode, struct file *file, int *ret) | 1676 | __tracing_open(struct inode *inode, struct file *file, int *ret) |
1655 | { | 1677 | { |
1678 | long cpu_file = (long) inode->i_private; | ||
1656 | struct trace_iterator *iter; | 1679 | struct trace_iterator *iter; |
1657 | struct seq_file *m; | 1680 | struct seq_file *m; |
1658 | int cpu; | 1681 | int cpu; |
@@ -1672,9 +1695,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1672 | if (current_trace && current_trace->print_max) | 1695 | if (current_trace && current_trace->print_max) |
1673 | iter->tr = &max_tr; | 1696 | iter->tr = &max_tr; |
1674 | else | 1697 | else |
1675 | iter->tr = inode->i_private; | 1698 | iter->tr = &global_trace; |
1676 | iter->trace = current_trace; | 1699 | iter->trace = current_trace; |
1677 | iter->pos = -1; | 1700 | iter->pos = -1; |
1701 | iter->cpu_file = cpu_file; | ||
1678 | 1702 | ||
1679 | /* Notify the tracer early; before we stop tracing. */ | 1703 | /* Notify the tracer early; before we stop tracing. */ |
1680 | if (iter->trace && iter->trace->open) | 1704 | if (iter->trace && iter->trace->open) |
@@ -1684,14 +1708,22 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1684 | if (ring_buffer_overruns(iter->tr->buffer)) | 1708 | if (ring_buffer_overruns(iter->tr->buffer)) |
1685 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 1709 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
1686 | 1710 | ||
1711 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | ||
1712 | for_each_tracing_cpu(cpu) { | ||
1687 | 1713 | ||
1688 | for_each_tracing_cpu(cpu) { | 1714 | iter->buffer_iter[cpu] = |
1715 | ring_buffer_read_start(iter->tr->buffer, cpu); | ||
1689 | 1716 | ||
1717 | if (!iter->buffer_iter[cpu]) | ||
1718 | goto fail_buffer; | ||
1719 | } | ||
1720 | } else { | ||
1721 | cpu = iter->cpu_file; | ||
1690 | iter->buffer_iter[cpu] = | 1722 | iter->buffer_iter[cpu] = |
1691 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1723 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1692 | 1724 | ||
1693 | if (!iter->buffer_iter[cpu]) | 1725 | if (!iter->buffer_iter[cpu]) |
1694 | goto fail_buffer; | 1726 | goto fail; |
1695 | } | 1727 | } |
1696 | 1728 | ||
1697 | /* TODO stop tracer */ | 1729 | /* TODO stop tracer */ |
@@ -1715,6 +1747,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1715 | if (iter->buffer_iter[cpu]) | 1747 | if (iter->buffer_iter[cpu]) |
1716 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1748 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
1717 | } | 1749 | } |
1750 | fail: | ||
1718 | mutex_unlock(&trace_types_lock); | 1751 | mutex_unlock(&trace_types_lock); |
1719 | kfree(iter); | 1752 | kfree(iter); |
1720 | 1753 | ||
@@ -2325,54 +2358,77 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
2325 | return cnt; | 2358 | return cnt; |
2326 | } | 2359 | } |
2327 | 2360 | ||
2328 | static atomic_t tracing_reader; | ||
2329 | |||
2330 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 2361 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
2331 | { | 2362 | { |
2363 | long cpu_file = (long) inode->i_private; | ||
2332 | struct trace_iterator *iter; | 2364 | struct trace_iterator *iter; |
2365 | int ret = 0; | ||
2333 | 2366 | ||
2334 | if (tracing_disabled) | 2367 | if (tracing_disabled) |
2335 | return -ENODEV; | 2368 | return -ENODEV; |
2336 | 2369 | ||
2337 | /* We only allow for reader of the pipe */ | 2370 | mutex_lock(&trace_types_lock); |
2338 | if (atomic_inc_return(&tracing_reader) != 1) { | 2371 | |
2339 | atomic_dec(&tracing_reader); | 2372 | /* We only allow one reader per cpu */ |
2340 | return -EBUSY; | 2373 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
2374 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
2375 | ret = -EBUSY; | ||
2376 | goto out; | ||
2377 | } | ||
2378 | cpumask_setall(tracing_reader_cpumask); | ||
2379 | } else { | ||
2380 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
2381 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
2382 | else { | ||
2383 | ret = -EBUSY; | ||
2384 | goto out; | ||
2385 | } | ||
2341 | } | 2386 | } |
2342 | 2387 | ||
2343 | /* create a buffer to store the information to pass to userspace */ | 2388 | /* create a buffer to store the information to pass to userspace */ |
2344 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2389 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2345 | if (!iter) | 2390 | if (!iter) { |
2346 | return -ENOMEM; | 2391 | ret = -ENOMEM; |
2392 | goto out; | ||
2393 | } | ||
2347 | 2394 | ||
2348 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 2395 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
2349 | kfree(iter); | 2396 | kfree(iter); |
2350 | return -ENOMEM; | 2397 | ret = -ENOMEM; |
2398 | goto out; | ||
2351 | } | 2399 | } |
2352 | 2400 | ||
2353 | mutex_lock(&trace_types_lock); | ||
2354 | |||
2355 | /* trace pipe does not show start of buffer */ | 2401 | /* trace pipe does not show start of buffer */ |
2356 | cpumask_setall(iter->started); | 2402 | cpumask_setall(iter->started); |
2357 | 2403 | ||
2404 | iter->cpu_file = cpu_file; | ||
2358 | iter->tr = &global_trace; | 2405 | iter->tr = &global_trace; |
2359 | iter->trace = current_trace; | 2406 | iter->trace = current_trace; |
2360 | filp->private_data = iter; | 2407 | filp->private_data = iter; |
2361 | 2408 | ||
2362 | if (iter->trace->pipe_open) | 2409 | if (iter->trace->pipe_open) |
2363 | iter->trace->pipe_open(iter); | 2410 | iter->trace->pipe_open(iter); |
2364 | mutex_unlock(&trace_types_lock); | ||
2365 | 2411 | ||
2366 | return 0; | 2412 | out: |
2413 | mutex_unlock(&trace_types_lock); | ||
2414 | return ret; | ||
2367 | } | 2415 | } |
2368 | 2416 | ||
2369 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 2417 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
2370 | { | 2418 | { |
2371 | struct trace_iterator *iter = file->private_data; | 2419 | struct trace_iterator *iter = file->private_data; |
2372 | 2420 | ||
2421 | mutex_lock(&trace_types_lock); | ||
2422 | |||
2423 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | ||
2424 | cpumask_clear(tracing_reader_cpumask); | ||
2425 | else | ||
2426 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
2427 | |||
2428 | mutex_unlock(&trace_types_lock); | ||
2429 | |||
2373 | free_cpumask_var(iter->started); | 2430 | free_cpumask_var(iter->started); |
2374 | kfree(iter); | 2431 | kfree(iter); |
2375 | atomic_dec(&tracing_reader); | ||
2376 | 2432 | ||
2377 | return 0; | 2433 | return 0; |
2378 | } | 2434 | } |
@@ -2911,6 +2967,59 @@ struct dentry *tracing_init_dentry(void) | |||
2911 | return d_tracer; | 2967 | return d_tracer; |
2912 | } | 2968 | } |
2913 | 2969 | ||
2970 | static struct dentry *d_percpu; | ||
2971 | |||
2972 | struct dentry *tracing_dentry_percpu(void) | ||
2973 | { | ||
2974 | static int once; | ||
2975 | struct dentry *d_tracer; | ||
2976 | |||
2977 | if (d_percpu) | ||
2978 | return d_percpu; | ||
2979 | |||
2980 | d_tracer = tracing_init_dentry(); | ||
2981 | |||
2982 | if (!d_tracer) | ||
2983 | return NULL; | ||
2984 | |||
2985 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | ||
2986 | |||
2987 | if (!d_percpu && !once) { | ||
2988 | once = 1; | ||
2989 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | ||
2990 | return NULL; | ||
2991 | } | ||
2992 | |||
2993 | return d_percpu; | ||
2994 | } | ||
2995 | |||
2996 | static void tracing_init_debugfs_percpu(long cpu) | ||
2997 | { | ||
2998 | struct dentry *d_percpu = tracing_dentry_percpu(); | ||
2999 | struct dentry *entry; | ||
3000 | /* strlen(trace_pipe) + MAX(log10(cpu)) + '\0' */ | ||
3001 | char filename[17]; | ||
3002 | |||
3003 | if (cpu > 999 || cpu < 0) | ||
3004 | return; | ||
3005 | |||
3006 | /* per cpu trace_pipe */ | ||
3007 | sprintf(filename, "trace_pipe%ld", cpu); | ||
3008 | |||
3009 | entry = debugfs_create_file(filename, 0444, d_percpu, | ||
3010 | (void *) cpu, &tracing_pipe_fops); | ||
3011 | if (!entry) | ||
3012 | pr_warning("Could not create debugfs '%s' entry\n", filename); | ||
3013 | |||
3014 | /* per cpu trace */ | ||
3015 | sprintf(filename, "trace%ld", cpu); | ||
3016 | |||
3017 | entry = debugfs_create_file(filename, 0444, d_percpu, | ||
3018 | (void *) cpu, &tracing_fops); | ||
3019 | if (!entry) | ||
3020 | pr_warning("Could not create debugfs '%s' entry\n", filename); | ||
3021 | } | ||
3022 | |||
2914 | #ifdef CONFIG_FTRACE_SELFTEST | 3023 | #ifdef CONFIG_FTRACE_SELFTEST |
2915 | /* Let selftest have access to static functions in this file */ | 3024 | /* Let selftest have access to static functions in this file */ |
2916 | #include "trace_selftest.c" | 3025 | #include "trace_selftest.c" |
@@ -2920,6 +3029,7 @@ static __init int tracer_init_debugfs(void) | |||
2920 | { | 3029 | { |
2921 | struct dentry *d_tracer; | 3030 | struct dentry *d_tracer; |
2922 | struct dentry *entry; | 3031 | struct dentry *entry; |
3032 | int cpu; | ||
2923 | 3033 | ||
2924 | d_tracer = tracing_init_dentry(); | 3034 | d_tracer = tracing_init_dentry(); |
2925 | 3035 | ||
@@ -2939,7 +3049,7 @@ static __init int tracer_init_debugfs(void) | |||
2939 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 3049 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); |
2940 | 3050 | ||
2941 | entry = debugfs_create_file("trace", 0444, d_tracer, | 3051 | entry = debugfs_create_file("trace", 0444, d_tracer, |
2942 | &global_trace, &tracing_fops); | 3052 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); |
2943 | if (!entry) | 3053 | if (!entry) |
2944 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3054 | pr_warning("Could not create debugfs 'trace' entry\n"); |
2945 | 3055 | ||
@@ -2970,8 +3080,8 @@ static __init int tracer_init_debugfs(void) | |||
2970 | if (!entry) | 3080 | if (!entry) |
2971 | pr_warning("Could not create debugfs 'README' entry\n"); | 3081 | pr_warning("Could not create debugfs 'README' entry\n"); |
2972 | 3082 | ||
2973 | entry = debugfs_create_file("trace_pipe", 0644, d_tracer, | 3083 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, |
2974 | NULL, &tracing_pipe_fops); | 3084 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
2975 | if (!entry) | 3085 | if (!entry) |
2976 | pr_warning("Could not create debugfs " | 3086 | pr_warning("Could not create debugfs " |
2977 | "'trace_pipe' entry\n"); | 3087 | "'trace_pipe' entry\n"); |
@@ -2999,6 +3109,10 @@ static __init int tracer_init_debugfs(void) | |||
2999 | #ifdef CONFIG_SYSPROF_TRACER | 3109 | #ifdef CONFIG_SYSPROF_TRACER |
3000 | init_tracer_sysprof_debugfs(d_tracer); | 3110 | init_tracer_sysprof_debugfs(d_tracer); |
3001 | #endif | 3111 | #endif |
3112 | |||
3113 | for_each_tracing_cpu(cpu) | ||
3114 | tracing_init_debugfs_percpu(cpu); | ||
3115 | |||
3002 | return 0; | 3116 | return 0; |
3003 | } | 3117 | } |
3004 | 3118 | ||
@@ -3222,8 +3336,12 @@ __init static int tracer_alloc_buffers(void) | |||
3222 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 3336 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
3223 | goto out_free_buffer_mask; | 3337 | goto out_free_buffer_mask; |
3224 | 3338 | ||
3339 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
3340 | goto out_free_tracing_cpumask; | ||
3341 | |||
3225 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 3342 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
3226 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 3343 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
3344 | cpumask_clear(tracing_reader_cpumask); | ||
3227 | 3345 | ||
3228 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3346 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
3229 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3347 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
@@ -3272,6 +3390,8 @@ __init static int tracer_alloc_buffers(void) | |||
3272 | ret = 0; | 3390 | ret = 0; |
3273 | 3391 | ||
3274 | out_free_cpumask: | 3392 | out_free_cpumask: |
3393 | free_cpumask_var(tracing_reader_cpumask); | ||
3394 | out_free_tracing_cpumask: | ||
3275 | free_cpumask_var(tracing_cpumask); | 3395 | free_cpumask_var(tracing_cpumask); |
3276 | out_free_buffer_mask: | 3396 | out_free_buffer_mask: |
3277 | free_cpumask_var(tracing_buffer_mask); | 3397 | free_cpumask_var(tracing_buffer_mask); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index eed732c151f..508235a39da 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -395,6 +395,8 @@ struct trace_seq { | |||
395 | unsigned int readpos; | 395 | unsigned int readpos; |
396 | }; | 396 | }; |
397 | 397 | ||
398 | #define TRACE_PIPE_ALL_CPU -1 | ||
399 | |||
398 | /* | 400 | /* |
399 | * Trace iterator - used by printout routines who present trace | 401 | * Trace iterator - used by printout routines who present trace |
400 | * results to users and which routines might sleep, etc: | 402 | * results to users and which routines might sleep, etc: |
@@ -404,6 +406,7 @@ struct trace_iterator { | |||
404 | struct tracer *trace; | 406 | struct tracer *trace; |
405 | void *private; | 407 | void *private; |
406 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | 408 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
409 | int cpu_file; | ||
407 | 410 | ||
408 | /* The below is zeroed out in pipe_read */ | 411 | /* The below is zeroed out in pipe_read */ |
409 | struct trace_seq seq; | 412 | struct trace_seq seq; |