aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-19 08:21:42 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-19 12:25:47 -0400
commit9cffa8d53335d891cc0ecb3824a67118b3ee4b2f (patch)
tree420e0f96198f0e78aedd006280826b8cf0839820 /tools/perf/builtin-stat.c
parentb49a9e7e72103ea91946453c19703a4dfa1994fe (diff)
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than unsigned long long. This causes compiler warnings every time we print a __u64 value with %Lx. Rather than changing __u64, we define our own u64 to be unsigned long long on all architectures, and similarly s64 as signed long long. For consistency we also define u32, s32, u16, s16, u8 and s8. These definitions are put in a new header, types.h, because these definitions are needed in util/string.h and util/symbol.h. The main change here is the mechanical change of __[us]{64,32,16,8} to remove the "__". The other changes are: * Create types.h * Include types.h in perf.h, util/string.h and util/symbol.h * Add types.h to the LIB_H definition in Makefile * Added (u64) casts in process_overflow_event() and print_sym_table() to kill two remaining warnings. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: benh@kernel.crashing.org LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e5b3c0ff03a9..6d3eeac1ea25 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -85,29 +85,29 @@ static const unsigned int default_count[] = {
85static int run_count = 1; 85static int run_count = 1;
86static int run_idx = 0; 86static int run_idx = 0;
87 87
88static __u64 event_res[MAX_RUN][MAX_COUNTERS][3]; 88static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
89static __u64 event_scaled[MAX_RUN][MAX_COUNTERS]; 89static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
90 90
91//static __u64 event_hist[MAX_RUN][MAX_COUNTERS][3]; 91//static u64 event_hist[MAX_RUN][MAX_COUNTERS][3];
92 92
93 93
94static __u64 runtime_nsecs[MAX_RUN]; 94static u64 runtime_nsecs[MAX_RUN];
95static __u64 walltime_nsecs[MAX_RUN]; 95static u64 walltime_nsecs[MAX_RUN];
96static __u64 runtime_cycles[MAX_RUN]; 96static u64 runtime_cycles[MAX_RUN];
97 97
98static __u64 event_res_avg[MAX_COUNTERS][3]; 98static u64 event_res_avg[MAX_COUNTERS][3];
99static __u64 event_res_noise[MAX_COUNTERS][3]; 99static u64 event_res_noise[MAX_COUNTERS][3];
100 100
101static __u64 event_scaled_avg[MAX_COUNTERS]; 101static u64 event_scaled_avg[MAX_COUNTERS];
102 102
103static __u64 runtime_nsecs_avg; 103static u64 runtime_nsecs_avg;
104static __u64 runtime_nsecs_noise; 104static u64 runtime_nsecs_noise;
105 105
106static __u64 walltime_nsecs_avg; 106static u64 walltime_nsecs_avg;
107static __u64 walltime_nsecs_noise; 107static u64 walltime_nsecs_noise;
108 108
109static __u64 runtime_cycles_avg; 109static u64 runtime_cycles_avg;
110static __u64 runtime_cycles_noise; 110static u64 runtime_cycles_noise;
111 111
112static void create_perf_stat_counter(int counter) 112static void create_perf_stat_counter(int counter)
113{ 113{
@@ -158,7 +158,7 @@ static inline int nsec_counter(int counter)
158 */ 158 */
159static void read_counter(int counter) 159static void read_counter(int counter)
160{ 160{
161 __u64 *count, single_count[3]; 161 u64 *count, single_count[3];
162 ssize_t res; 162 ssize_t res;
163 int cpu, nv; 163 int cpu, nv;
164 int scaled; 164 int scaled;
@@ -172,8 +172,8 @@ static void read_counter(int counter)
172 if (fd[cpu][counter] < 0) 172 if (fd[cpu][counter] < 0)
173 continue; 173 continue;
174 174
175 res = read(fd[cpu][counter], single_count, nv * sizeof(__u64)); 175 res = read(fd[cpu][counter], single_count, nv * sizeof(u64));
176 assert(res == nv * sizeof(__u64)); 176 assert(res == nv * sizeof(u64));
177 close(fd[cpu][counter]); 177 close(fd[cpu][counter]);
178 fd[cpu][counter] = -1; 178 fd[cpu][counter] = -1;
179 179
@@ -251,14 +251,14 @@ static int run_perf_stat(int argc, const char **argv)
251 return WEXITSTATUS(status); 251 return WEXITSTATUS(status);
252} 252}
253 253
254static void print_noise(__u64 *count, __u64 *noise) 254static void print_noise(u64 *count, u64 *noise)
255{ 255{
256 if (run_count > 1) 256 if (run_count > 1)
257 fprintf(stderr, " ( +- %7.3f%% )", 257 fprintf(stderr, " ( +- %7.3f%% )",
258 (double)noise[0]/(count[0]+1)*100.0); 258 (double)noise[0]/(count[0]+1)*100.0);
259} 259}
260 260
261static void nsec_printout(int counter, __u64 *count, __u64 *noise) 261static void nsec_printout(int counter, u64 *count, u64 *noise)
262{ 262{
263 double msecs = (double)count[0] / 1000000; 263 double msecs = (double)count[0] / 1000000;
264 264
@@ -274,7 +274,7 @@ static void nsec_printout(int counter, __u64 *count, __u64 *noise)
274 print_noise(count, noise); 274 print_noise(count, noise);
275} 275}
276 276
277static void abs_printout(int counter, __u64 *count, __u64 *noise) 277static void abs_printout(int counter, u64 *count, u64 *noise)
278{ 278{
279 fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter)); 279 fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter));
280 280
@@ -298,7 +298,7 @@ static void abs_printout(int counter, __u64 *count, __u64 *noise)
298 */ 298 */
299static void print_counter(int counter) 299static void print_counter(int counter)
300{ 300{
301 __u64 *count, *noise; 301 u64 *count, *noise;
302 int scaled; 302 int scaled;
303 303
304 count = event_res_avg[counter]; 304 count = event_res_avg[counter];
@@ -326,16 +326,16 @@ static void print_counter(int counter)
326/* 326/*
327 * normalize_noise noise values down to stddev: 327 * normalize_noise noise values down to stddev:
328 */ 328 */
329static void normalize_noise(__u64 *val) 329static void normalize_noise(u64 *val)
330{ 330{
331 double res; 331 double res;
332 332
333 res = (double)*val / (run_count * sqrt((double)run_count)); 333 res = (double)*val / (run_count * sqrt((double)run_count));
334 334
335 *val = (__u64)res; 335 *val = (u64)res;
336} 336}
337 337
338static void update_avg(const char *name, int idx, __u64 *avg, __u64 *val) 338static void update_avg(const char *name, int idx, u64 *avg, u64 *val)
339{ 339{
340 *avg += *val; 340 *avg += *val;
341 341
@@ -380,19 +380,19 @@ static void calc_avg(void)
380 380
381 for (i = 0; i < run_count; i++) { 381 for (i = 0; i < run_count; i++) {
382 runtime_nsecs_noise += 382 runtime_nsecs_noise +=
383 abs((__s64)(runtime_nsecs[i] - runtime_nsecs_avg)); 383 abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg));
384 walltime_nsecs_noise += 384 walltime_nsecs_noise +=
385 abs((__s64)(walltime_nsecs[i] - walltime_nsecs_avg)); 385 abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg));
386 runtime_cycles_noise += 386 runtime_cycles_noise +=
387 abs((__s64)(runtime_cycles[i] - runtime_cycles_avg)); 387 abs((s64)(runtime_cycles[i] - runtime_cycles_avg));
388 388
389 for (j = 0; j < nr_counters; j++) { 389 for (j = 0; j < nr_counters; j++) {
390 event_res_noise[j][0] += 390 event_res_noise[j][0] +=
391 abs((__s64)(event_res[i][j][0] - event_res_avg[j][0])); 391 abs((s64)(event_res[i][j][0] - event_res_avg[j][0]));
392 event_res_noise[j][1] += 392 event_res_noise[j][1] +=
393 abs((__s64)(event_res[i][j][1] - event_res_avg[j][1])); 393 abs((s64)(event_res[i][j][1] - event_res_avg[j][1]));
394 event_res_noise[j][2] += 394 event_res_noise[j][2] +=
395 abs((__s64)(event_res[i][j][2] - event_res_avg[j][2])); 395 abs((s64)(event_res[i][j][2] - event_res_avg[j][2]));
396 } 396 }
397 } 397 }
398 398