aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 23:35:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 23:35:41 -0500
commit350e4f4985472e29091b899bc227d75d2a66fb4c (patch)
tree98d17fe2198025d55511d7a306a787b76c3dde4f /kernel/trace
parentc32809521de5b31699a33379183848b0c7628f28 (diff)
parentdb0865543739b3edb2ee9bf340380cf4986b58ff (diff)
Merge tag 'trace-seq-buf-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull nmi-safe seq_buf printk update from Steven Rostedt: "This code is a fork from the trace-3.19 pull as it needed the trace_seq clean ups from that branch. This code solves the issue of performing stack dumps from NMI context. The issue is that printk() is not safe from NMI context as if the NMI were to trigger when a printk() was being performed, the NMI could deadlock from the printk() internal locks. This has been seen in practice. With lots of review from Petr Mladek, this code went through several iterations, and we feel that it is now at a point of quality to be accepted into mainline. Here's what is contained in this patch set: - Creates a "seq_buf" generic buffer utility that allows a descriptor to be passed around where functions can write their own "printk()" formatted strings into it. The generic version was pulled out of the trace_seq() code that was made specifically for tracing. - The seq_buf code was change to model the seq_file code. I have a patch (not included for 3.19) that converts the seq_file.c code over to use seq_buf.c like the trace_seq.c code does. This was done to make sure that seq_buf.c is compatible with seq_file.c. I may try to get that patch in for 3.20. - The seq_buf.c file was moved to lib/ to remove it from being dependent on CONFIG_TRACING. - The printk() was updated to allow for a per_cpu "override" of the internal calls. That is, instead of writing to the console, a call to printk() may do something else. This made it easier to allow the NMI to change what printk() does in order to call dump_stack() without needing to update that code as well. - Finally, the dump_stack from all CPUs via NMI code was converted to use the seq_buf code. The caller to trigger the NMI code would wait till all the NMIs finished, and then it would print the seq_buf data to the console safely from a non NMI context One added bonus is that this code also makes the NMI dump stack work on PREEMPT_RT kernels. As printk() includes sleeping locks on PREEMPT_RT, printk() only writes to console if the console does not use any rt_mutex converted spin locks. Which a lot do" * tag 'trace-seq-buf-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: x86/nmi: Fix use of unallocated cpumask_var_t printk/percpu: Define printk_func when printk is not defined x86/nmi: Perform a safe NMI stack trace on all CPUs printk: Add per_cpu printk func to allow printk to be diverted seq_buf: Move the seq_buf code to lib/ seq-buf: Make seq_buf_bprintf() conditional on CONFIG_BINARY_PRINTF tracing: Add seq_buf_get_buf() and seq_buf_commit() helper functions tracing: Have seq_buf use full buffer seq_buf: Add seq_buf_can_fit() helper function tracing: Add paranoid size check in trace_printk_seq() tracing: Use trace_seq_used() and seq_buf_used() instead of len tracing: Clean up tracing_fill_pipe_page() seq_buf: Create seq_buf_used() to find out how much was written tracing: Add a seq_buf_clear() helper and clear len and readpos in init tracing: Convert seq_buf fields to be like seq_file fields tracing: Convert seq_buf_path() to be like seq_path() tracing: Create seq_buf layer in trace_seq
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c65
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_seq.c177
4 files changed, 140 insertions, 122 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ce11fa50a2f0..1af4f8f2ab5d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -939,19 +939,20 @@ out:
939 return ret; 939 return ret;
940} 940}
941 941
942/* TODO add a seq_buf_to_buffer() */
942static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 943static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
943{ 944{
944 int len; 945 int len;
945 946
946 if (s->len <= s->readpos) 947 if (trace_seq_used(s) <= s->seq.readpos)
947 return -EBUSY; 948 return -EBUSY;
948 949
949 len = s->len - s->readpos; 950 len = trace_seq_used(s) - s->seq.readpos;
950 if (cnt > len) 951 if (cnt > len)
951 cnt = len; 952 cnt = len;
952 memcpy(buf, s->buffer + s->readpos, cnt); 953 memcpy(buf, s->buffer + s->seq.readpos, cnt);
953 954
954 s->readpos += cnt; 955 s->seq.readpos += cnt;
955 return cnt; 956 return cnt;
956} 957}
957 958
@@ -4313,6 +4314,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4313 goto out; 4314 goto out;
4314 } 4315 }
4315 4316
4317 trace_seq_init(&iter->seq);
4318
4316 /* 4319 /*
4317 * We make a copy of the current tracer to avoid concurrent 4320 * We make a copy of the current tracer to avoid concurrent
4318 * changes on it while we are reading. 4321 * changes on it while we are reading.
@@ -4506,18 +4509,18 @@ waitagain:
4506 trace_access_lock(iter->cpu_file); 4509 trace_access_lock(iter->cpu_file);
4507 while (trace_find_next_entry_inc(iter) != NULL) { 4510 while (trace_find_next_entry_inc(iter) != NULL) {
4508 enum print_line_t ret; 4511 enum print_line_t ret;
4509 int len = iter->seq.len; 4512 int save_len = iter->seq.seq.len;
4510 4513
4511 ret = print_trace_line(iter); 4514 ret = print_trace_line(iter);
4512 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4515 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4513 /* don't print partial lines */ 4516 /* don't print partial lines */
4514 iter->seq.len = len; 4517 iter->seq.seq.len = save_len;
4515 break; 4518 break;
4516 } 4519 }
4517 if (ret != TRACE_TYPE_NO_CONSUME) 4520 if (ret != TRACE_TYPE_NO_CONSUME)
4518 trace_consume(iter); 4521 trace_consume(iter);
4519 4522
4520 if (iter->seq.len >= cnt) 4523 if (trace_seq_used(&iter->seq) >= cnt)
4521 break; 4524 break;
4522 4525
4523 /* 4526 /*
@@ -4533,7 +4536,7 @@ waitagain:
4533 4536
4534 /* Now copy what we have to the user */ 4537 /* Now copy what we have to the user */
4535 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4536 if (iter->seq.readpos >= iter->seq.len) 4539 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4537 trace_seq_init(&iter->seq); 4540 trace_seq_init(&iter->seq);
4538 4541
4539 /* 4542 /*
@@ -4567,20 +4570,33 @@ static size_t
4567tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4570tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4568{ 4571{
4569 size_t count; 4572 size_t count;
4573 int save_len;
4570 int ret; 4574 int ret;
4571 4575
4572 /* Seq buffer is page-sized, exactly what we need. */ 4576 /* Seq buffer is page-sized, exactly what we need. */
4573 for (;;) { 4577 for (;;) {
4574 count = iter->seq.len; 4578 save_len = iter->seq.seq.len;
4575 ret = print_trace_line(iter); 4579 ret = print_trace_line(iter);
4576 count = iter->seq.len - count; 4580
4577 if (rem < count) { 4581 if (trace_seq_has_overflowed(&iter->seq)) {
4578 rem = 0; 4582 iter->seq.seq.len = save_len;
4579 iter->seq.len -= count;
4580 break; 4583 break;
4581 } 4584 }
4585
4586 /*
4587 * This should not be hit, because it should only
4588 * be set if the iter->seq overflowed. But check it
4589 * anyway to be safe.
4590 */
4582 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4591 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4583 iter->seq.len -= count; 4592 iter->seq.seq.len = save_len;
4593 break;
4594 }
4595
4596 count = trace_seq_used(&iter->seq) - save_len;
4597 if (rem < count) {
4598 rem = 0;
4599 iter->seq.seq.len = save_len;
4584 break; 4600 break;
4585 } 4601 }
4586 4602
@@ -4661,13 +4677,13 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4661 /* Copy the data into the page, so we can start over. */ 4677 /* Copy the data into the page, so we can start over. */
4662 ret = trace_seq_to_buffer(&iter->seq, 4678 ret = trace_seq_to_buffer(&iter->seq,
4663 page_address(spd.pages[i]), 4679 page_address(spd.pages[i]),
4664 iter->seq.len); 4680 trace_seq_used(&iter->seq));
4665 if (ret < 0) { 4681 if (ret < 0) {
4666 __free_page(spd.pages[i]); 4682 __free_page(spd.pages[i]);
4667 break; 4683 break;
4668 } 4684 }
4669 spd.partial[i].offset = 0; 4685 spd.partial[i].offset = 0;
4670 spd.partial[i].len = iter->seq.len; 4686 spd.partial[i].len = trace_seq_used(&iter->seq);
4671 4687
4672 trace_seq_init(&iter->seq); 4688 trace_seq_init(&iter->seq);
4673 } 4689 }
@@ -5667,7 +5683,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5667 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5683 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5668 trace_seq_printf(s, "read events: %ld\n", cnt); 5684 trace_seq_printf(s, "read events: %ld\n", cnt);
5669 5685
5670 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5686 count = simple_read_from_buffer(ubuf, count, ppos,
5687 s->buffer, trace_seq_used(s));
5671 5688
5672 kfree(s); 5689 kfree(s);
5673 5690
@@ -6630,11 +6647,19 @@ void
6630trace_printk_seq(struct trace_seq *s) 6647trace_printk_seq(struct trace_seq *s)
6631{ 6648{
6632 /* Probably should print a warning here. */ 6649 /* Probably should print a warning here. */
6633 if (s->len >= TRACE_MAX_PRINT) 6650 if (s->seq.len >= TRACE_MAX_PRINT)
6634 s->len = TRACE_MAX_PRINT; 6651 s->seq.len = TRACE_MAX_PRINT;
6652
6653 /*
6654 * More paranoid code. Although the buffer size is set to
6655 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6656 * an extra layer of protection.
6657 */
6658 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6659 s->seq.len = s->seq.size - 1;
6635 6660
6636 /* should be zero ended, but we are paranoid. */ 6661 /* should be zero ended, but we are paranoid. */
6637 s->buffer[s->len] = 0; 6662 s->buffer[s->seq.len] = 0;
6638 6663
6639 printk(KERN_TRACE "%s", s->buffer); 6664 printk(KERN_TRACE "%s", s->buffer);
6640 6665
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 139716bcef7a..d0e4f92b5eb6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1044,7 +1044,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1044 mutex_unlock(&event_mutex); 1044 mutex_unlock(&event_mutex);
1045 1045
1046 if (file) 1046 if (file)
1047 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1047 r = simple_read_from_buffer(ubuf, cnt, ppos,
1048 s->buffer, trace_seq_used(s));
1048 1049
1049 kfree(s); 1050 kfree(s);
1050 1051
@@ -1210,7 +1211,8 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1210 trace_seq_init(s); 1211 trace_seq_init(s);
1211 1212
1212 print_subsystem_event_filter(system, s); 1213 print_subsystem_event_filter(system, s);
1213 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1214 r = simple_read_from_buffer(ubuf, cnt, ppos,
1215 s->buffer, trace_seq_used(s));
1214 1216
1215 kfree(s); 1217 kfree(s);
1216 1218
@@ -1265,7 +1267,8 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1265 trace_seq_init(s); 1267 trace_seq_init(s);
1266 1268
1267 func(s); 1269 func(s);
1268 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1270 r = simple_read_from_buffer(ubuf, cnt, ppos,
1271 s->buffer, trace_seq_used(s));
1269 1272
1270 kfree(s); 1273 kfree(s);
1271 1274
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6c2ab955018c..ba476009e5de 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1136,14 +1136,17 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1136 return ret; 1136 return ret;
1137 } 1137 }
1138 1138
1139 if (trace_seq_has_overflowed(s))
1140 goto out;
1141
1139 /* Strip ending newline */ 1142 /* Strip ending newline */
1140 if (s->buffer[s->len - 1] == '\n') { 1143 if (s->buffer[s->seq.len - 1] == '\n') {
1141 s->buffer[s->len - 1] = '\0'; 1144 s->buffer[s->seq.len - 1] = '\0';
1142 s->len--; 1145 s->seq.len--;
1143 } 1146 }
1144 1147
1145 trace_seq_puts(s, " */\n"); 1148 trace_seq_puts(s, " */\n");
1146 1149 out:
1147 return trace_handle_return(s); 1150 return trace_handle_return(s);
1148} 1151}
1149 1152
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index fabfa0f190a3..f8b45d8792f9 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -27,10 +27,19 @@
27#include <linux/trace_seq.h> 27#include <linux/trace_seq.h>
28 28
29/* How much buffer is left on the trace_seq? */ 29/* How much buffer is left on the trace_seq? */
30#define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len) 30#define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
31 31
32/* How much buffer is written? */ 32/* How much buffer is written? */
33#define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1)) 33#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
34
35/*
36 * trace_seq should work with being initialized with 0s.
37 */
38static inline void __trace_seq_init(struct trace_seq *s)
39{
40 if (unlikely(!s->seq.size))
41 trace_seq_init(s);
42}
34 43
35/** 44/**
36 * trace_print_seq - move the contents of trace_seq into a seq_file 45 * trace_print_seq - move the contents of trace_seq into a seq_file
@@ -43,10 +52,11 @@
43 */ 52 */
44int trace_print_seq(struct seq_file *m, struct trace_seq *s) 53int trace_print_seq(struct seq_file *m, struct trace_seq *s)
45{ 54{
46 unsigned int len = TRACE_SEQ_BUF_USED(s);
47 int ret; 55 int ret;
48 56
49 ret = seq_write(m, s->buffer, len); 57 __trace_seq_init(s);
58
59 ret = seq_buf_print_seq(m, &s->seq);
50 60
51 /* 61 /*
52 * Only reset this buffer if we successfully wrote to the 62 * Only reset this buffer if we successfully wrote to the
@@ -72,24 +82,23 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
72 */ 82 */
73void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 83void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
74{ 84{
75 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 85 unsigned int save_len = s->seq.len;
76 va_list ap; 86 va_list ap;
77 int ret;
78 87
79 if (s->full || !len) 88 if (s->full)
80 return; 89 return;
81 90
91 __trace_seq_init(s);
92
82 va_start(ap, fmt); 93 va_start(ap, fmt);
83 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); 94 seq_buf_vprintf(&s->seq, fmt, ap);
84 va_end(ap); 95 va_end(ap);
85 96
86 /* If we can't write it all, don't bother writing anything */ 97 /* If we can't write it all, don't bother writing anything */
87 if (ret >= len) { 98 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
99 s->seq.len = save_len;
88 s->full = 1; 100 s->full = 1;
89 return;
90 } 101 }
91
92 s->len += ret;
93} 102}
94EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
95 104
@@ -104,14 +113,19 @@ EXPORT_SYMBOL_GPL(trace_seq_printf);
104void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 113void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
105 int nmaskbits) 114 int nmaskbits)
106{ 115{
107 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 116 unsigned int save_len = s->seq.len;
108 int ret;
109 117
110 if (s->full || !len) 118 if (s->full)
111 return; 119 return;
112 120
113 ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); 121 __trace_seq_init(s);
114 s->len += ret; 122
123 seq_buf_bitmask(&s->seq, maskp, nmaskbits);
124
125 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
126 s->seq.len = save_len;
127 s->full = 1;
128 }
115} 129}
116EXPORT_SYMBOL_GPL(trace_seq_bitmask); 130EXPORT_SYMBOL_GPL(trace_seq_bitmask);
117 131
@@ -128,21 +142,20 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask);
128 */ 142 */
129void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) 143void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
130{ 144{
131 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 145 unsigned int save_len = s->seq.len;
132 int ret;
133 146
134 if (s->full || !len) 147 if (s->full)
135 return; 148 return;
136 149
137 ret = vsnprintf(s->buffer + s->len, len, fmt, args); 150 __trace_seq_init(s);
151
152 seq_buf_vprintf(&s->seq, fmt, args);
138 153
139 /* If we can't write it all, don't bother writing anything */ 154 /* If we can't write it all, don't bother writing anything */
140 if (ret >= len) { 155 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
156 s->seq.len = save_len;
141 s->full = 1; 157 s->full = 1;
142 return;
143 } 158 }
144
145 s->len += ret;
146} 159}
147EXPORT_SYMBOL_GPL(trace_seq_vprintf); 160EXPORT_SYMBOL_GPL(trace_seq_vprintf);
148 161
@@ -163,21 +176,21 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf);
163 */ 176 */
164void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 177void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
165{ 178{
166 unsigned int len = TRACE_SEQ_BUF_LEFT(s); 179 unsigned int save_len = s->seq.len;
167 int ret;
168 180
169 if (s->full || !len) 181 if (s->full)
170 return; 182 return;
171 183
172 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 184 __trace_seq_init(s);
185
186 seq_buf_bprintf(&s->seq, fmt, binary);
173 187
174 /* If we can't write it all, don't bother writing anything */ 188 /* If we can't write it all, don't bother writing anything */
175 if (ret >= len) { 189 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
190 s->seq.len = save_len;
176 s->full = 1; 191 s->full = 1;
177 return; 192 return;
178 } 193 }
179
180 s->len += ret;
181} 194}
182EXPORT_SYMBOL_GPL(trace_seq_bprintf); 195EXPORT_SYMBOL_GPL(trace_seq_bprintf);
183 196
@@ -198,13 +211,14 @@ void trace_seq_puts(struct trace_seq *s, const char *str)
198 if (s->full) 211 if (s->full)
199 return; 212 return;
200 213
214 __trace_seq_init(s);
215
201 if (len > TRACE_SEQ_BUF_LEFT(s)) { 216 if (len > TRACE_SEQ_BUF_LEFT(s)) {
202 s->full = 1; 217 s->full = 1;
203 return; 218 return;
204 } 219 }
205 220
206 memcpy(s->buffer + s->len, str, len); 221 seq_buf_putmem(&s->seq, str, len);
207 s->len += len;
208} 222}
209EXPORT_SYMBOL_GPL(trace_seq_puts); 223EXPORT_SYMBOL_GPL(trace_seq_puts);
210 224
@@ -223,12 +237,14 @@ void trace_seq_putc(struct trace_seq *s, unsigned char c)
223 if (s->full) 237 if (s->full)
224 return; 238 return;
225 239
240 __trace_seq_init(s);
241
226 if (TRACE_SEQ_BUF_LEFT(s) < 1) { 242 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
227 s->full = 1; 243 s->full = 1;
228 return; 244 return;
229 } 245 }
230 246
231 s->buffer[s->len++] = c; 247 seq_buf_putc(&s->seq, c);
232} 248}
233EXPORT_SYMBOL_GPL(trace_seq_putc); 249EXPORT_SYMBOL_GPL(trace_seq_putc);
234 250
@@ -247,19 +263,17 @@ void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
247 if (s->full) 263 if (s->full)
248 return; 264 return;
249 265
266 __trace_seq_init(s);
267
250 if (len > TRACE_SEQ_BUF_LEFT(s)) { 268 if (len > TRACE_SEQ_BUF_LEFT(s)) {
251 s->full = 1; 269 s->full = 1;
252 return; 270 return;
253 } 271 }
254 272
255 memcpy(s->buffer + s->len, mem, len); 273 seq_buf_putmem(&s->seq, mem, len);
256 s->len += len;
257} 274}
258EXPORT_SYMBOL_GPL(trace_seq_putmem); 275EXPORT_SYMBOL_GPL(trace_seq_putmem);
259 276
260#define MAX_MEMHEX_BYTES 8U
261#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
262
263/** 277/**
264 * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex 278 * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex
265 * @s: trace sequence descriptor 279 * @s: trace sequence descriptor
@@ -273,32 +287,26 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem);
273void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 287void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
274 unsigned int len) 288 unsigned int len)
275{ 289{
276 unsigned char hex[HEX_CHARS]; 290 unsigned int save_len = s->seq.len;
277 const unsigned char *data = mem;
278 unsigned int start_len;
279 int i, j;
280 291
281 if (s->full) 292 if (s->full)
282 return; 293 return;
283 294
284 while (len) { 295 __trace_seq_init(s);
285 start_len = min(len, HEX_CHARS - 1); 296
286#ifdef __BIG_ENDIAN 297 /* Each byte is represented by two chars */
287 for (i = 0, j = 0; i < start_len; i++) { 298 if (len * 2 > TRACE_SEQ_BUF_LEFT(s)) {
288#else 299 s->full = 1;
289 for (i = start_len-1, j = 0; i >= 0; i--) { 300 return;
290#endif 301 }
291 hex[j++] = hex_asc_hi(data[i]); 302
292 hex[j++] = hex_asc_lo(data[i]); 303 /* The added spaces can still cause an overflow */
293 } 304 seq_buf_putmem_hex(&s->seq, mem, len);
294 if (WARN_ON_ONCE(j == 0 || j/2 > len)) 305
295 break; 306 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
296 307 s->seq.len = save_len;
297 /* j increments twice per loop */ 308 s->full = 1;
298 len -= j / 2; 309 return;
299 hex[j++] = ' ';
300
301 trace_seq_putmem(s, hex, j);
302 } 310 }
303} 311}
304EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); 312EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
@@ -317,30 +325,27 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
317 */ 325 */
318int trace_seq_path(struct trace_seq *s, const struct path *path) 326int trace_seq_path(struct trace_seq *s, const struct path *path)
319{ 327{
320 unsigned char *p; 328 unsigned int save_len = s->seq.len;
321 329
322 if (s->full) 330 if (s->full)
323 return 0; 331 return 0;
324 332
333 __trace_seq_init(s);
334
325 if (TRACE_SEQ_BUF_LEFT(s) < 1) { 335 if (TRACE_SEQ_BUF_LEFT(s) < 1) {
326 s->full = 1; 336 s->full = 1;
327 return 0; 337 return 0;
328 } 338 }
329 339
330 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 340 seq_buf_path(&s->seq, path, "\n");
331 if (!IS_ERR(p)) { 341
332 p = mangle_path(s->buffer + s->len, p, "\n"); 342 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
333 if (p) { 343 s->seq.len = save_len;
334 s->len = p - s->buffer; 344 s->full = 1;
335 return 1; 345 return 0;
336 }
337 } else {
338 s->buffer[s->len++] = '?';
339 return 1;
340 } 346 }
341 347
342 s->full = 1; 348 return 1;
343 return 0;
344} 349}
345EXPORT_SYMBOL_GPL(trace_seq_path); 350EXPORT_SYMBOL_GPL(trace_seq_path);
346 351
@@ -366,25 +371,7 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
366 */ 371 */
367int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) 372int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
368{ 373{
369 int len; 374 __trace_seq_init(s);
370 int ret; 375 return seq_buf_to_user(&s->seq, ubuf, cnt);
371
372 if (!cnt)
373 return 0;
374
375 if (s->len <= s->readpos)
376 return -EBUSY;
377
378 len = s->len - s->readpos;
379 if (cnt > len)
380 cnt = len;
381 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
382 if (ret == cnt)
383 return -EFAULT;
384
385 cnt -= ret;
386
387 s->readpos += cnt;
388 return cnt;
389} 376}
390EXPORT_SYMBOL_GPL(trace_seq_to_user); 377EXPORT_SYMBOL_GPL(trace_seq_to_user);