aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2016-10-03 10:07:24 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-10-03 10:24:13 -0400
commit18ef15c675a5d5d97f844ebcf340a2a6c7cf3142 (patch)
tree08616a05632dbbea3281e040069f5cbdca9878d8
parentead1a57457c0324a167f3f9e3a70e26c2d75fb12 (diff)
perf tools: Experiment with cppcheck
Experimenting a bit using cppcheck[1], a static checker brought to my attention by Colin, reducing the scope of some variables, reducing the line of source code lines in the process: $ cppcheck --enable=style tools/perf/util/thread.c Checking tools/perf/util/thread.c... [tools/perf/util/thread.c:17]: (style) The scope of the variable 'leader' can be reduced. [tools/perf/util/thread.c:133]: (style) The scope of the variable 'err' can be reduced. [tools/perf/util/thread.c:273]: (style) The scope of the variable 'err' can be reduced. Will continue later, but these are already useful, keep them. 1: https://sourceforge.net/p/cppcheck/wiki/Home/ Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Colin Ian King <colin.king@canonical.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-ixws7lbycihhpmq9cc949ti6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/util/evlist.c12
-rw-r--r--tools/perf/util/evsel.c3
-rw-r--r--tools/perf/util/machine.c6
-rw-r--r--tools/perf/util/strbuf.h3
-rw-r--r--tools/perf/util/thread.c9
5 files changed, 13 insertions, 20 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ea34c5a32c11..d92e02006fb8 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -384,15 +384,14 @@ void perf_evlist__toggle_enable(struct perf_evlist *evlist)
384static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, 384static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
385 struct perf_evsel *evsel, int cpu) 385 struct perf_evsel *evsel, int cpu)
386{ 386{
387 int thread, err; 387 int thread;
388 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 388 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
389 389
390 if (!evsel->fd) 390 if (!evsel->fd)
391 return -EINVAL; 391 return -EINVAL;
392 392
393 for (thread = 0; thread < nr_threads; thread++) { 393 for (thread = 0; thread < nr_threads; thread++) {
394 err = ioctl(FD(evsel, cpu, thread), 394 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
395 PERF_EVENT_IOC_ENABLE, 0);
396 if (err) 395 if (err)
397 return err; 396 return err;
398 } 397 }
@@ -403,14 +402,14 @@ static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
403 struct perf_evsel *evsel, 402 struct perf_evsel *evsel,
404 int thread) 403 int thread)
405{ 404{
406 int cpu, err; 405 int cpu;
407 int nr_cpus = cpu_map__nr(evlist->cpus); 406 int nr_cpus = cpu_map__nr(evlist->cpus);
408 407
409 if (!evsel->fd) 408 if (!evsel->fd)
410 return -EINVAL; 409 return -EINVAL;
411 410
412 for (cpu = 0; cpu < nr_cpus; cpu++) { 411 for (cpu = 0; cpu < nr_cpus; cpu++) {
413 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 412 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
414 if (err) 413 if (err)
415 return err; 414 return err;
416 } 415 }
@@ -1606,10 +1605,9 @@ void perf_evlist__close(struct perf_evlist *evlist)
1606 struct perf_evsel *evsel; 1605 struct perf_evsel *evsel;
1607 int ncpus = cpu_map__nr(evlist->cpus); 1606 int ncpus = cpu_map__nr(evlist->cpus);
1608 int nthreads = thread_map__nr(evlist->threads); 1607 int nthreads = thread_map__nr(evlist->threads);
1609 int n;
1610 1608
1611 evlist__for_each_entry_reverse(evlist, evsel) { 1609 evlist__for_each_entry_reverse(evlist, evsel) {
1612 n = evsel->cpus ? evsel->cpus->nr : ncpus; 1610 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
1613 perf_evsel__close(evsel, n, nthreads); 1611 perf_evsel__close(evsel, n, nthreads);
1614 } 1612 }
1615} 1613}
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 380e84c3af3d..8bc271141d9d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -985,14 +985,13 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
985 985
986static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 986static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
987{ 987{
988 int cpu, thread;
989
990 if (evsel->system_wide) 988 if (evsel->system_wide)
991 nthreads = 1; 989 nthreads = 1;
992 990
993 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 991 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
994 992
995 if (evsel->fd) { 993 if (evsel->fd) {
994 int cpu, thread;
996 for (cpu = 0; cpu < ncpus; cpu++) { 995 for (cpu = 0; cpu < ncpus; cpu++) {
997 for (thread = 0; thread < nthreads; thread++) { 996 for (thread = 0; thread < nthreads; thread++) {
998 FD(evsel, cpu, thread) = -1; 997 FD(evsel, cpu, thread) = -1;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 18e4519abef2..df85b9efd80f 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1745,9 +1745,8 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
1745 int max_stack) 1745 int max_stack)
1746{ 1746{
1747 struct ip_callchain *chain = sample->callchain; 1747 struct ip_callchain *chain = sample->callchain;
1748 int chain_nr = min(max_stack, (int)chain->nr); 1748 int chain_nr = min(max_stack, (int)chain->nr), i;
1749 u8 cpumode = PERF_RECORD_MISC_USER; 1749 u8 cpumode = PERF_RECORD_MISC_USER;
1750 int i, j, err;
1751 u64 ip; 1750 u64 ip;
1752 1751
1753 for (i = 0; i < chain_nr; i++) { 1752 for (i = 0; i < chain_nr; i++) {
@@ -1758,7 +1757,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
1758 /* LBR only affects the user callchain */ 1757 /* LBR only affects the user callchain */
1759 if (i != chain_nr) { 1758 if (i != chain_nr) {
1760 struct branch_stack *lbr_stack = sample->branch_stack; 1759 struct branch_stack *lbr_stack = sample->branch_stack;
1761 int lbr_nr = lbr_stack->nr; 1760 int lbr_nr = lbr_stack->nr, j;
1762 /* 1761 /*
1763 * LBR callstack can only get user call chain. 1762 * LBR callstack can only get user call chain.
1764 * The mix_chain_nr is kernel call chain 1763 * The mix_chain_nr is kernel call chain
@@ -1772,6 +1771,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
1772 int mix_chain_nr = i + 1 + lbr_nr + 1; 1771 int mix_chain_nr = i + 1 + lbr_nr + 1;
1773 1772
1774 for (j = 0; j < mix_chain_nr; j++) { 1773 for (j = 0; j < mix_chain_nr; j++) {
1774 int err;
1775 if (callchain_param.order == ORDER_CALLEE) { 1775 if (callchain_param.order == ORDER_CALLEE) {
1776 if (j < i + 1) 1776 if (j < i + 1)
1777 ip = chain->ips[j]; 1777 ip = chain->ips[j];
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index b268a6648a5d..318424ea561d 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -66,9 +66,8 @@ static inline ssize_t strbuf_avail(const struct strbuf *sb) {
66int strbuf_grow(struct strbuf *buf, size_t); 66int strbuf_grow(struct strbuf *buf, size_t);
67 67
68static inline int strbuf_setlen(struct strbuf *sb, size_t len) { 68static inline int strbuf_setlen(struct strbuf *sb, size_t len) {
69 int ret;
70 if (!sb->alloc) { 69 if (!sb->alloc) {
71 ret = strbuf_grow(sb, 0); 70 int ret = strbuf_grow(sb, 0);
72 if (ret) 71 if (ret)
73 return ret; 72 return ret;
74 } 73 }
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 8b10a55410a2..f5af87f66663 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -14,13 +14,12 @@
14 14
15int thread__init_map_groups(struct thread *thread, struct machine *machine) 15int thread__init_map_groups(struct thread *thread, struct machine *machine)
16{ 16{
17 struct thread *leader;
18 pid_t pid = thread->pid_; 17 pid_t pid = thread->pid_;
19 18
20 if (pid == thread->tid || pid == -1) { 19 if (pid == thread->tid || pid == -1) {
21 thread->mg = map_groups__new(machine); 20 thread->mg = map_groups__new(machine);
22 } else { 21 } else {
23 leader = __machine__findnew_thread(machine, pid, pid); 22 struct thread *leader = __machine__findnew_thread(machine, pid, pid);
24 if (leader) { 23 if (leader) {
25 thread->mg = map_groups__get(leader->mg); 24 thread->mg = map_groups__get(leader->mg);
26 thread__put(leader); 25 thread__put(leader);
@@ -130,11 +129,10 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
130 bool exec) 129 bool exec)
131{ 130{
132 struct comm *new, *curr = thread__comm(thread); 131 struct comm *new, *curr = thread__comm(thread);
133 int err;
134 132
135 /* Override the default :tid entry */ 133 /* Override the default :tid entry */
136 if (!thread->comm_set) { 134 if (!thread->comm_set) {
137 err = comm__override(curr, str, timestamp, exec); 135 int err = comm__override(curr, str, timestamp, exec);
138 if (err) 136 if (err)
139 return err; 137 return err;
140 } else { 138 } else {
@@ -270,10 +268,9 @@ static int thread__clone_map_groups(struct thread *thread,
270 268
271int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) 269int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
272{ 270{
273 int err;
274
275 if (parent->comm_set) { 271 if (parent->comm_set) {
276 const char *comm = thread__comm_str(parent); 272 const char *comm = thread__comm_str(parent);
273 int err;
277 if (!comm) 274 if (!comm)
278 return -ENOMEM; 275 return -ENOMEM;
279 err = thread__set_comm(thread, comm, timestamp); 276 err = thread__set_comm(thread, comm, timestamp);