summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-15 17:47:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-15 17:47:02 -0400
commitaa2e3ac64ace127f403be85aa4d6015b859385f2 (patch)
treed2b7fea0f3bff7916496c5204bd601bb96ea44fd /kernel
parent323ea40ff6fb1e9d2c481bff50245ee5f559c8af (diff)
parenta039480e9e93896cadc5a91468964febb3c5d488 (diff)
Merge tag 'trace-v5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes and cleanups from Steven Rostedt: "This contains a series of last minute clean ups, small fixes and error checks" * tag 'trace-v5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing/probe: Verify alloc_trace_*probe() result tracing/probe: Check event/group naming rule at parsing tracing/probe: Check the size of argument name and body tracing/probe: Check event name length correctly tracing/probe: Check maxactive error cases tracing: kdb: Fix ftdump to not sleep trace/probes: Remove kernel doc style from non kernel doc comment tracing/probes: Make reserved_field_names static
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_kdb.c6
-rw-r--r--kernel/trace/trace_kprobe.c23
-rw-r--r--kernel/trace/trace_probe.c20
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_uprobe.c8
7 files changed, 44 insertions, 25 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9a91479bbbfe..41b6f96e5366 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4191,6 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
4191 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 4191 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4192 * @buffer: The ring buffer to read from 4192 * @buffer: The ring buffer to read from
4193 * @cpu: The cpu buffer to iterate over 4193 * @cpu: The cpu buffer to iterate over
4194 * @flags: gfp flags to use for memory allocation
4194 * 4195 *
4195 * This performs the initial preparations necessary to iterate 4196 * This performs the initial preparations necessary to iterate
4196 * through the buffer. Memory is allocated, buffer recording 4197 * through the buffer. Memory is allocated, buffer recording
@@ -4208,7 +4209,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
4208 * This overall must be paired with ring_buffer_read_finish. 4209 * This overall must be paired with ring_buffer_read_finish.
4209 */ 4210 */
4210struct ring_buffer_iter * 4211struct ring_buffer_iter *
4211ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 4212ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
4212{ 4213{
4213 struct ring_buffer_per_cpu *cpu_buffer; 4214 struct ring_buffer_per_cpu *cpu_buffer;
4214 struct ring_buffer_iter *iter; 4215 struct ring_buffer_iter *iter;
@@ -4216,7 +4217,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4216 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4217 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4217 return NULL; 4218 return NULL;
4218 4219
4219 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 4220 iter = kmalloc(sizeof(*iter), flags);
4220 if (!iter) 4221 if (!iter)
4221 return NULL; 4222 return NULL;
4222 4223
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 94ffc1c559d5..21153e64bf1c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4079,7 +4079,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4079 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4079 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4080 for_each_tracing_cpu(cpu) { 4080 for_each_tracing_cpu(cpu) {
4081 iter->buffer_iter[cpu] = 4081 iter->buffer_iter[cpu] =
4082 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 4082 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4083 cpu, GFP_KERNEL);
4083 } 4084 }
4084 ring_buffer_read_prepare_sync(); 4085 ring_buffer_read_prepare_sync();
4085 for_each_tracing_cpu(cpu) { 4086 for_each_tracing_cpu(cpu) {
@@ -4089,7 +4090,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4089 } else { 4090 } else {
4090 cpu = iter->cpu_file; 4091 cpu = iter->cpu_file;
4091 iter->buffer_iter[cpu] = 4092 iter->buffer_iter[cpu] =
4092 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 4093 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4094 cpu, GFP_KERNEL);
4093 ring_buffer_read_prepare_sync(); 4095 ring_buffer_read_prepare_sync();
4094 ring_buffer_read_start(iter->buffer_iter[cpu]); 4096 ring_buffer_read_start(iter->buffer_iter[cpu]);
4095 tracing_iter_reset(iter, cpu); 4097 tracing_iter_reset(iter, cpu);
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index d953c163a079..810d78a8d14c 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
51 if (cpu_file == RING_BUFFER_ALL_CPUS) { 51 if (cpu_file == RING_BUFFER_ALL_CPUS) {
52 for_each_tracing_cpu(cpu) { 52 for_each_tracing_cpu(cpu) {
53 iter.buffer_iter[cpu] = 53 iter.buffer_iter[cpu] =
54 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); 54 ring_buffer_read_prepare(iter.trace_buffer->buffer,
55 cpu, GFP_ATOMIC);
55 ring_buffer_read_start(iter.buffer_iter[cpu]); 56 ring_buffer_read_start(iter.buffer_iter[cpu]);
56 tracing_iter_reset(&iter, cpu); 57 tracing_iter_reset(&iter, cpu);
57 } 58 }
58 } else { 59 } else {
59 iter.cpu_file = cpu_file; 60 iter.cpu_file = cpu_file;
60 iter.buffer_iter[cpu_file] = 61 iter.buffer_iter[cpu_file] =
61 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file); 62 ring_buffer_read_prepare(iter.trace_buffer->buffer,
63 cpu_file, GFP_ATOMIC);
62 ring_buffer_read_start(iter.buffer_iter[cpu_file]); 64 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
63 tracing_iter_reset(&iter, cpu_file); 65 tracing_iter_reset(&iter, cpu_file);
64 } 66 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 99592c27465e..5d5129b05df7 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,7 +35,7 @@ static struct dyn_event_operations trace_kprobe_ops = {
35 .match = trace_kprobe_match, 35 .match = trace_kprobe_match,
36}; 36};
37 37
38/** 38/*
39 * Kprobe event core functions 39 * Kprobe event core functions
40 */ 40 */
41struct trace_kprobe { 41struct trace_kprobe {
@@ -221,7 +221,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
221 221
222 tk->rp.maxactive = maxactive; 222 tk->rp.maxactive = maxactive;
223 223
224 if (!event || !is_good_name(event)) { 224 if (!event || !group) {
225 ret = -EINVAL; 225 ret = -EINVAL;
226 goto error; 226 goto error;
227 } 227 }
@@ -231,11 +231,6 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
231 if (!tk->tp.call.name) 231 if (!tk->tp.call.name)
232 goto error; 232 goto error;
233 233
234 if (!group || !is_good_name(group)) {
235 ret = -EINVAL;
236 goto error;
237 }
238
239 tk->tp.class.system = kstrdup(group, GFP_KERNEL); 234 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
240 if (!tk->tp.class.system) 235 if (!tk->tp.class.system)
241 goto error; 236 goto error;
@@ -624,7 +619,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
624 if (event) 619 if (event)
625 event++; 620 event++;
626 621
627 if (is_return && isdigit(argv[0][1])) { 622 if (isdigit(argv[0][1])) {
623 if (!is_return) {
624 pr_info("Maxactive is not for kprobe");
625 return -EINVAL;
626 }
628 if (event) 627 if (event)
629 len = event - &argv[0][1] - 1; 628 len = event - &argv[0][1] - 1;
630 else 629 else
@@ -634,8 +633,8 @@ static int trace_kprobe_create(int argc, const char *argv[])
634 memcpy(buf, &argv[0][1], len); 633 memcpy(buf, &argv[0][1], len);
635 buf[len] = '\0'; 634 buf[len] = '\0';
636 ret = kstrtouint(buf, 0, &maxactive); 635 ret = kstrtouint(buf, 0, &maxactive);
637 if (ret) { 636 if (ret || !maxactive) {
638 pr_info("Failed to parse maxactive.\n"); 637 pr_info("Invalid maxactive number\n");
639 return ret; 638 return ret;
640 } 639 }
641 /* kretprobes instances are iterated over via a list. The 640 /* kretprobes instances are iterated over via a list. The
@@ -694,9 +693,9 @@ static int trace_kprobe_create(int argc, const char *argv[])
694 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 693 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
695 argc, is_return); 694 argc, is_return);
696 if (IS_ERR(tk)) { 695 if (IS_ERR(tk)) {
697 pr_info("Failed to allocate trace_probe.(%d)\n",
698 (int)PTR_ERR(tk));
699 ret = PTR_ERR(tk); 696 ret = PTR_ERR(tk);
697 /* This must return -ENOMEM otherwise there is a bug */
698 WARN_ON_ONCE(ret != -ENOMEM);
700 goto out; 699 goto out;
701 } 700 }
702 701
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 89da34b326e3..8f8411e7835f 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -13,7 +13,7 @@
13 13
14#include "trace_probe.h" 14#include "trace_probe.h"
15 15
16const char *reserved_field_names[] = { 16static const char *reserved_field_names[] = {
17 "common_type", 17 "common_type",
18 "common_flags", 18 "common_flags",
19 "common_preempt_count", 19 "common_preempt_count",
@@ -159,6 +159,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
159 char *buf) 159 char *buf)
160{ 160{
161 const char *slash, *event = *pevent; 161 const char *slash, *event = *pevent;
162 int len;
162 163
163 slash = strchr(event, '/'); 164 slash = strchr(event, '/');
164 if (slash) { 165 if (slash) {
@@ -171,12 +172,25 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
171 return -E2BIG; 172 return -E2BIG;
172 } 173 }
173 strlcpy(buf, event, slash - event + 1); 174 strlcpy(buf, event, slash - event + 1);
175 if (!is_good_name(buf)) {
176 pr_info("Group name must follow the same rules as C identifiers\n");
177 return -EINVAL;
178 }
174 *pgroup = buf; 179 *pgroup = buf;
175 *pevent = slash + 1; 180 *pevent = slash + 1;
181 event = *pevent;
176 } 182 }
177 if (strlen(event) == 0) { 183 len = strlen(event);
184 if (len == 0) {
178 pr_info("Event name is not specified\n"); 185 pr_info("Event name is not specified\n");
179 return -EINVAL; 186 return -EINVAL;
187 } else if (len > MAX_EVENT_NAME_LEN) {
188 pr_info("Event name is too long\n");
189 return -E2BIG;
190 }
191 if (!is_good_name(event)) {
192 pr_info("Event name must follow the same rules as C identifiers\n");
193 return -EINVAL;
180 } 194 }
181 return 0; 195 return 0;
182} 196}
@@ -548,6 +562,8 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
548 562
549 body = strchr(arg, '='); 563 body = strchr(arg, '=');
550 if (body) { 564 if (body) {
565 if (body - arg > MAX_ARG_NAME_LEN || body == arg)
566 return -EINVAL;
551 parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL); 567 parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
552 body++; 568 body++;
553 } else { 569 } else {
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 8a63f8bc01bc..2177c206de15 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -32,6 +32,7 @@
32#define MAX_TRACE_ARGS 128 32#define MAX_TRACE_ARGS 128
33#define MAX_ARGSTR_LEN 63 33#define MAX_ARGSTR_LEN 63
34#define MAX_ARRAY_LEN 64 34#define MAX_ARRAY_LEN 64
35#define MAX_ARG_NAME_LEN 32
35#define MAX_STRING_SIZE PATH_MAX 36#define MAX_STRING_SIZE PATH_MAX
36 37
37/* Reserved field names */ 38/* Reserved field names */
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 9bde07c06362..be78d99ee6bc 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -273,10 +273,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
273{ 273{
274 struct trace_uprobe *tu; 274 struct trace_uprobe *tu;
275 275
276 if (!event || !is_good_name(event)) 276 if (!event || !group)
277 return ERR_PTR(-EINVAL);
278
279 if (!group || !is_good_name(group))
280 return ERR_PTR(-EINVAL); 277 return ERR_PTR(-EINVAL);
281 278
282 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); 279 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
@@ -524,8 +521,9 @@ static int trace_uprobe_create(int argc, const char **argv)
524 521
525 tu = alloc_trace_uprobe(group, event, argc, is_return); 522 tu = alloc_trace_uprobe(group, event, argc, is_return);
526 if (IS_ERR(tu)) { 523 if (IS_ERR(tu)) {
527 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
528 ret = PTR_ERR(tu); 524 ret = PTR_ERR(tu);
525 /* This must return -ENOMEM otherwise there is a bug */
526 WARN_ON_ONCE(ret != -ENOMEM);
529 goto fail_address_parse; 527 goto fail_address_parse;
530 } 528 }
531 tu->offset = offset; 529 tu->offset = offset;