aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-kmem.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2012-09-08 21:53:06 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-09-09 10:39:02 -0400
commit2814eb05720baa54ffe0950714fd55a5bcc8a565 (patch)
treef49ae1044379205e053bec2e43bcfd36d0d55e6e /tools/perf/builtin-kmem.c
parenta116e05dcf61c8d758e0f0aed40325534aee2c13 (diff)
perf kmem: Remove die() calls
Just use pr_err() + return -1 and perf_session__process_events to abort when some event would call die(), then let the perf's main() exit doing whatever it needs. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-i7rhuqfwshjiwc9gr9m1vov4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-kmem.c')
-rw-r--r--tools/perf/builtin-kmem.c108
1 files changed, 67 insertions, 41 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index fc6607b383f2..ad9f52097388 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -58,41 +58,52 @@ static unsigned long nr_allocs, nr_cross_allocs;
58 58
59#define PATH_SYS_NODE "/sys/devices/system/node" 59#define PATH_SYS_NODE "/sys/devices/system/node"
60 60
61static void init_cpunode_map(void) 61static int init_cpunode_map(void)
62{ 62{
63 FILE *fp; 63 FILE *fp;
64 int i; 64 int i, err = -1;
65 65
66 fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); 66 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
67 if (!fp) { 67 if (!fp) {
68 max_cpu_num = 4096; 68 max_cpu_num = 4096;
69 return; 69 return 0;
70 }
71
72 if (fscanf(fp, "%d", &max_cpu_num) < 1) {
73 pr_err("Failed to read 'kernel_max' from sysfs");
74 goto out_close;
70 } 75 }
71 76
72 if (fscanf(fp, "%d", &max_cpu_num) < 1)
73 die("Failed to read 'kernel_max' from sysfs");
74 max_cpu_num++; 77 max_cpu_num++;
75 78
76 cpunode_map = calloc(max_cpu_num, sizeof(int)); 79 cpunode_map = calloc(max_cpu_num, sizeof(int));
77 if (!cpunode_map) 80 if (!cpunode_map) {
78 die("calloc"); 81 pr_err("%s: calloc failed\n", __func__);
82 goto out_close;
83 }
84
79 for (i = 0; i < max_cpu_num; i++) 85 for (i = 0; i < max_cpu_num; i++)
80 cpunode_map[i] = -1; 86 cpunode_map[i] = -1;
87
88 err = 0;
89out_close:
81 fclose(fp); 90 fclose(fp);
91 return err;
82} 92}
83 93
84static void setup_cpunode_map(void) 94static int setup_cpunode_map(void)
85{ 95{
86 struct dirent *dent1, *dent2; 96 struct dirent *dent1, *dent2;
87 DIR *dir1, *dir2; 97 DIR *dir1, *dir2;
88 unsigned int cpu, mem; 98 unsigned int cpu, mem;
89 char buf[PATH_MAX]; 99 char buf[PATH_MAX];
90 100
91 init_cpunode_map(); 101 if (init_cpunode_map())
102 return -1;
92 103
93 dir1 = opendir(PATH_SYS_NODE); 104 dir1 = opendir(PATH_SYS_NODE);
94 if (!dir1) 105 if (!dir1)
95 return; 106 return -1;
96 107
97 while ((dent1 = readdir(dir1)) != NULL) { 108 while ((dent1 = readdir(dir1)) != NULL) {
98 if (dent1->d_type != DT_DIR || 109 if (dent1->d_type != DT_DIR ||
@@ -112,10 +123,11 @@ static void setup_cpunode_map(void)
112 closedir(dir2); 123 closedir(dir2);
113 } 124 }
114 closedir(dir1); 125 closedir(dir1);
126 return 0;
115} 127}
116 128
117static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, 129static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
118 int bytes_req, int bytes_alloc, int cpu) 130 int bytes_req, int bytes_alloc, int cpu)
119{ 131{
120 struct rb_node **node = &root_alloc_stat.rb_node; 132 struct rb_node **node = &root_alloc_stat.rb_node;
121 struct rb_node *parent = NULL; 133 struct rb_node *parent = NULL;
@@ -139,8 +151,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
139 data->bytes_alloc += bytes_alloc; 151 data->bytes_alloc += bytes_alloc;
140 } else { 152 } else {
141 data = malloc(sizeof(*data)); 153 data = malloc(sizeof(*data));
142 if (!data) 154 if (!data) {
143 die("malloc"); 155 pr_err("%s: malloc failed\n", __func__);
156 return -1;
157 }
144 data->ptr = ptr; 158 data->ptr = ptr;
145 data->pingpong = 0; 159 data->pingpong = 0;
146 data->hit = 1; 160 data->hit = 1;
@@ -152,9 +166,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
152 } 166 }
153 data->call_site = call_site; 167 data->call_site = call_site;
154 data->alloc_cpu = cpu; 168 data->alloc_cpu = cpu;
169 return 0;
155} 170}
156 171
157static void insert_caller_stat(unsigned long call_site, 172static int insert_caller_stat(unsigned long call_site,
158 int bytes_req, int bytes_alloc) 173 int bytes_req, int bytes_alloc)
159{ 174{
160 struct rb_node **node = &root_caller_stat.rb_node; 175 struct rb_node **node = &root_caller_stat.rb_node;
@@ -179,8 +194,10 @@ static void insert_caller_stat(unsigned long call_site,
179 data->bytes_alloc += bytes_alloc; 194 data->bytes_alloc += bytes_alloc;
180 } else { 195 } else {
181 data = malloc(sizeof(*data)); 196 data = malloc(sizeof(*data));
182 if (!data) 197 if (!data) {
183 die("malloc"); 198 pr_err("%s: malloc failed\n", __func__);
199 return -1;
200 }
184 data->call_site = call_site; 201 data->call_site = call_site;
185 data->pingpong = 0; 202 data->pingpong = 0;
186 data->hit = 1; 203 data->hit = 1;
@@ -190,11 +207,12 @@ static void insert_caller_stat(unsigned long call_site,
190 rb_link_node(&data->node, parent, node); 207 rb_link_node(&data->node, parent, node);
191 rb_insert_color(&data->node, &root_caller_stat); 208 rb_insert_color(&data->node, &root_caller_stat);
192 } 209 }
210
211 return 0;
193} 212}
194 213
195static void perf_evsel__process_alloc_event(struct perf_evsel *evsel, 214static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
196 struct perf_sample *sample, 215 struct perf_sample *sample, int node)
197 int node)
198{ 216{
199 struct event_format *event = evsel->tp_format; 217 struct event_format *event = evsel->tp_format;
200 void *data = sample->raw_data; 218 void *data = sample->raw_data;
@@ -209,8 +227,9 @@ static void perf_evsel__process_alloc_event(struct perf_evsel *evsel,
209 bytes_req = raw_field_value(event, "bytes_req", data); 227 bytes_req = raw_field_value(event, "bytes_req", data);
210 bytes_alloc = raw_field_value(event, "bytes_alloc", data); 228 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
211 229
212 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); 230 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu) ||
213 insert_caller_stat(call_site, bytes_req, bytes_alloc); 231 insert_caller_stat(call_site, bytes_req, bytes_alloc))
232 return -1;
214 233
215 total_requested += bytes_req; 234 total_requested += bytes_req;
216 total_allocated += bytes_alloc; 235 total_allocated += bytes_alloc;
@@ -222,6 +241,7 @@ static void perf_evsel__process_alloc_event(struct perf_evsel *evsel,
222 nr_cross_allocs++; 241 nr_cross_allocs++;
223 } 242 }
224 nr_allocs++; 243 nr_allocs++;
244 return 0;
225} 245}
226 246
227static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); 247static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
@@ -252,8 +272,8 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
252 return NULL; 272 return NULL;
253} 273}
254 274
255static void perf_evsel__process_free_event(struct perf_evsel *evsel, 275static int perf_evsel__process_free_event(struct perf_evsel *evsel,
256 struct perf_sample *sample) 276 struct perf_sample *sample)
257{ 277{
258 unsigned long ptr = raw_field_value(evsel->tp_format, "ptr", 278 unsigned long ptr = raw_field_value(evsel->tp_format, "ptr",
259 sample->raw_data); 279 sample->raw_data);
@@ -261,41 +281,43 @@ static void perf_evsel__process_free_event(struct perf_evsel *evsel,
261 281
262 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); 282 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
263 if (!s_alloc) 283 if (!s_alloc)
264 return; 284 return 0;
265 285
266 if ((short)sample->cpu != s_alloc->alloc_cpu) { 286 if ((short)sample->cpu != s_alloc->alloc_cpu) {
267 s_alloc->pingpong++; 287 s_alloc->pingpong++;
268 288
269 s_caller = search_alloc_stat(0, s_alloc->call_site, 289 s_caller = search_alloc_stat(0, s_alloc->call_site,
270 &root_caller_stat, callsite_cmp); 290 &root_caller_stat, callsite_cmp);
271 assert(s_caller); 291 if (!s_caller)
292 return -1;
272 s_caller->pingpong++; 293 s_caller->pingpong++;
273 } 294 }
274 s_alloc->alloc_cpu = -1; 295 s_alloc->alloc_cpu = -1;
296
297 return 0;
275} 298}
276 299
277static void perf_evsel__process_kmem_event(struct perf_evsel *evsel, 300static int perf_evsel__process_kmem_event(struct perf_evsel *evsel,
278 struct perf_sample *sample) 301 struct perf_sample *sample)
279{ 302{
280 struct event_format *event = evsel->tp_format; 303 struct event_format *event = evsel->tp_format;
281 304
282 if (!strcmp(event->name, "kmalloc") || 305 if (!strcmp(event->name, "kmalloc") ||
283 !strcmp(event->name, "kmem_cache_alloc")) { 306 !strcmp(event->name, "kmem_cache_alloc")) {
284 perf_evsel__process_alloc_event(evsel, sample, 0); 307 return perf_evsel__process_alloc_event(evsel, sample, 0);
285 return;
286 } 308 }
287 309
288 if (!strcmp(event->name, "kmalloc_node") || 310 if (!strcmp(event->name, "kmalloc_node") ||
289 !strcmp(event->name, "kmem_cache_alloc_node")) { 311 !strcmp(event->name, "kmem_cache_alloc_node")) {
290 perf_evsel__process_alloc_event(evsel, sample, 1); 312 return perf_evsel__process_alloc_event(evsel, sample, 1);
291 return;
292 } 313 }
293 314
294 if (!strcmp(event->name, "kfree") || 315 if (!strcmp(event->name, "kfree") ||
295 !strcmp(event->name, "kmem_cache_free")) { 316 !strcmp(event->name, "kmem_cache_free")) {
296 perf_evsel__process_free_event(evsel, sample); 317 return perf_evsel__process_free_event(evsel, sample);
297 return;
298 } 318 }
319
320 return 0;
299} 321}
300 322
301static int process_sample_event(struct perf_tool *tool __used, 323static int process_sample_event(struct perf_tool *tool __used,
@@ -314,8 +336,7 @@ static int process_sample_event(struct perf_tool *tool __used,
314 336
315 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 337 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
316 338
317 perf_evsel__process_kmem_event(evsel, sample); 339 return perf_evsel__process_kmem_event(evsel, sample);
318 return 0;
319} 340}
320 341
321static struct perf_tool perf_kmem = { 342static struct perf_tool perf_kmem = {
@@ -613,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
613 for (i = 0; i < NUM_AVAIL_SORTS; i++) { 634 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
614 if (!strcmp(avail_sorts[i]->name, tok)) { 635 if (!strcmp(avail_sorts[i]->name, tok)) {
615 sort = malloc(sizeof(*sort)); 636 sort = malloc(sizeof(*sort));
616 if (!sort) 637 if (!sort) {
617 die("malloc"); 638 pr_err("%s: malloc failed\n", __func__);
639 return -1;
640 }
618 memcpy(sort, avail_sorts[i], sizeof(*sort)); 641 memcpy(sort, avail_sorts[i], sizeof(*sort));
619 list_add_tail(&sort->list, list); 642 list_add_tail(&sort->list, list);
620 return 0; 643 return 0;
@@ -629,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
629 char *tok; 652 char *tok;
630 char *str = strdup(arg); 653 char *str = strdup(arg);
631 654
632 if (!str) 655 if (!str) {
633 die("strdup"); 656 pr_err("%s: strdup failed\n", __func__);
657 return -1;
658 }
634 659
635 while (true) { 660 while (true) {
636 tok = strsep(&str, ","); 661 tok = strsep(&str, ",");
@@ -758,7 +783,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used)
758 if (!strncmp(argv[0], "rec", 3)) { 783 if (!strncmp(argv[0], "rec", 3)) {
759 return __cmd_record(argc, argv); 784 return __cmd_record(argc, argv);
760 } else if (!strcmp(argv[0], "stat")) { 785 } else if (!strcmp(argv[0], "stat")) {
761 setup_cpunode_map(); 786 if (setup_cpunode_map())
787 return -1;
762 788
763 if (list_empty(&caller_sort)) 789 if (list_empty(&caller_sort))
764 setup_sorting(&caller_sort, default_sort_order); 790 setup_sorting(&caller_sort, default_sort_order);