aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-lock.c
diff options
context:
space:
mode:
authorHitoshi Mitake <mitake@dcl.info.waseda.ac.jp>2010-04-21 08:23:54 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-04-23 21:23:14 -0400
commite4cef1f65061429c3e8b356233e87dc6653a9da5 (patch)
tree00262af5d37a4648778ed8c3f732a08f1b0776d8 /tools/perf/builtin-lock.c
parent4093b150e52e6da20e9496df8aa007423952ae42 (diff)
perf lock: Fix state machine to recognize lock sequence
Previous state machine of perf lock was really broken. This patch improves it a little. This patch prepares the list of state machine that represents lock sequences for each threads. These state machines can be one of these sequences: 1) acquire -> acquired -> release 2) acquire -> contended -> acquired -> release 3) acquire (w/ try) -> release 4) acquire (w/ read) -> release The case of 4) is a little special. Double acquire of read lock is allowed, so the state machine counts read lock number, and permits double acquire and release. But, things are not so simple. Something in my model is still wrong. I counted the number of lock instances with bad sequence, and ratio is like this (case of tracing whoami): bad:233, total:2279 version 2: * threads are now identified with tid, not pid * prepared SEQ_STATE_READ_ACQUIRED for read lock. * bunch of struct lock_seq_stat is now linked list * debug information enhanced (this have to be removed someday) e.g. | === output for debug=== | | bad:233, total:2279 | bad rate:0.000000 | histogram of events caused bad sequence | acquire: 165 | acquired: 0 | contended: 0 | release: 68 Signed-off-by: Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> LKML-Reference: <1271852634-9351-1-git-send-email-mitake@dcl.info.waseda.ac.jp> [rename SEQ_STATE_UNINITED to SEQ_STATE_UNINITIALIZED] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r--tools/perf/builtin-lock.c410
1 files changed, 342 insertions, 68 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6c38e4febf9f..716d8c544a56 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -23,6 +23,8 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/hash.h> 24#include <linux/hash.h>
25 25
26static struct perf_session *session;
27
26/* based on kernel/lockdep.c */ 28/* based on kernel/lockdep.c */
27#define LOCKHASH_BITS 12 29#define LOCKHASH_BITS 12
28#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) 30#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
@@ -32,9 +34,6 @@ static struct list_head lockhash_table[LOCKHASH_SIZE];
32#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) 34#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
33#define lockhashentry(key) (lockhash_table + __lockhashfn((key))) 35#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
34 36
35#define LOCK_STATE_UNLOCKED 0 /* initial state */
36#define LOCK_STATE_LOCKED 1
37
38struct lock_stat { 37struct lock_stat {
39 struct list_head hash_entry; 38 struct list_head hash_entry;
40 struct rb_node rb; /* used for sorting */ 39 struct rb_node rb; /* used for sorting */
@@ -47,20 +46,151 @@ struct lock_stat {
47 void *addr; /* address of lockdep_map, used as ID */ 46 void *addr; /* address of lockdep_map, used as ID */
48 char *name; /* for strcpy(), we cannot use const */ 47 char *name; /* for strcpy(), we cannot use const */
49 48
50 int state;
51 u64 prev_event_time; /* timestamp of previous event */
52
53 unsigned int nr_acquired;
54 unsigned int nr_acquire; 49 unsigned int nr_acquire;
50 unsigned int nr_acquired;
55 unsigned int nr_contended; 51 unsigned int nr_contended;
56 unsigned int nr_release; 52 unsigned int nr_release;
57 53
54 unsigned int nr_readlock;
55 unsigned int nr_trylock;
58 /* these times are in nano sec. */ 56 /* these times are in nano sec. */
59 u64 wait_time_total; 57 u64 wait_time_total;
60 u64 wait_time_min; 58 u64 wait_time_min;
61 u64 wait_time_max; 59 u64 wait_time_max;
60
61 int discard; /* flag of blacklist */
62};
63
64/*
65 * States of lock_seq_stat
66 *
67 * UNINITIALIZED is required for detecting first event of acquire.
68 * As the nature of lock events, there is no guarantee
69 * that the first event for the locks are acquire,
70 * it can be acquired, contended or release.
71 */
72#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
73#define SEQ_STATE_RELEASED 1
74#define SEQ_STATE_ACQUIRING 2
75#define SEQ_STATE_ACQUIRED 3
76#define SEQ_STATE_READ_ACQUIRED 4
77#define SEQ_STATE_CONTENDED 5
78
79/*
80 * MAX_LOCK_DEPTH
81 * Imported from include/linux/sched.h.
82 * Should this be synchronized?
83 */
84#define MAX_LOCK_DEPTH 48
85
86/*
87 * struct lock_seq_stat:
88 * Place to put on state of one lock sequence
89 * 1) acquire -> acquired -> release
90 * 2) acquire -> contended -> acquired -> release
91 * 3) acquire (with read or try) -> release
92 * 4) Are there other patterns?
93 */
94struct lock_seq_stat {
95 struct list_head list;
96 int state;
97 u64 prev_event_time;
98 void *addr;
99
100 int read_count;
62}; 101};
63 102
103struct thread_stat {
104 struct rb_node rb;
105
106 u32 tid;
107 struct list_head seq_list;
108};
109
110static struct rb_root thread_stats;
111
112static struct thread_stat *thread_stat_find(u32 tid)
113{
114 struct rb_node *node;
115 struct thread_stat *st;
116
117 node = thread_stats.rb_node;
118 while (node) {
119 st = container_of(node, struct thread_stat, rb);
120 if (st->tid == tid)
121 return st;
122 else if (tid < st->tid)
123 node = node->rb_left;
124 else
125 node = node->rb_right;
126 }
127
128 return NULL;
129}
130
131static void thread_stat_insert(struct thread_stat *new)
132{
133 struct rb_node **rb = &thread_stats.rb_node;
134 struct rb_node *parent = NULL;
135 struct thread_stat *p;
136
137 while (*rb) {
138 p = container_of(*rb, struct thread_stat, rb);
139 parent = *rb;
140
141 if (new->tid < p->tid)
142 rb = &(*rb)->rb_left;
143 else if (new->tid > p->tid)
144 rb = &(*rb)->rb_right;
145 else
146 BUG_ON("inserting invalid thread_stat\n");
147 }
148
149 rb_link_node(&new->rb, parent, rb);
150 rb_insert_color(&new->rb, &thread_stats);
151}
152
153static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
154{
155 struct thread_stat *st;
156
157 st = thread_stat_find(tid);
158 if (st)
159 return st;
160
161 st = zalloc(sizeof(struct thread_stat));
162 if (!st)
163 die("memory allocation failed\n");
164
165 st->tid = tid;
166 INIT_LIST_HEAD(&st->seq_list);
167
168 thread_stat_insert(st);
169
170 return st;
171}
172
173static struct thread_stat *thread_stat_findnew_first(u32 tid);
174static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
175 thread_stat_findnew_first;
176
177static struct thread_stat *thread_stat_findnew_first(u32 tid)
178{
179 struct thread_stat *st;
180
181 st = zalloc(sizeof(struct thread_stat));
182 if (!st)
183 die("memory allocation failed\n");
184 st->tid = tid;
185 INIT_LIST_HEAD(&st->seq_list);
186
187 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
188 rb_insert_color(&st->rb, &thread_stats);
189
190 thread_stat_findnew = thread_stat_findnew_after_first;
191 return st;
192}
193
64/* build simple key function one is bigger than two */ 194/* build simple key function one is bigger than two */
65#define SINGLE_KEY(member) \ 195#define SINGLE_KEY(member) \
66 static int lock_stat_key_ ## member(struct lock_stat *one, \ 196 static int lock_stat_key_ ## member(struct lock_stat *one, \
@@ -175,8 +305,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
175 goto alloc_failed; 305 goto alloc_failed;
176 strcpy(new->name, name); 306 strcpy(new->name, name);
177 307
178 /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
179 new->state = LOCK_STATE_UNLOCKED;
180 new->wait_time_min = ULLONG_MAX; 308 new->wait_time_min = ULLONG_MAX;
181 309
182 list_add(&new->hash_entry, entry); 310 list_add(&new->hash_entry, entry);
@@ -198,6 +326,7 @@ struct raw_event_sample {
198struct trace_acquire_event { 326struct trace_acquire_event {
199 void *addr; 327 void *addr;
200 const char *name; 328 const char *name;
329 int flag;
201}; 330};
202 331
203struct trace_acquired_event { 332struct trace_acquired_event {
@@ -241,120 +370,246 @@ struct trace_lock_handler {
241 struct thread *thread); 370 struct thread *thread);
242}; 371};
243 372
373static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
374{
375 struct lock_seq_stat *seq;
376
377 list_for_each_entry(seq, &ts->seq_list, list) {
378 if (seq->addr == addr)
379 return seq;
380 }
381
382 seq = zalloc(sizeof(struct lock_seq_stat));
383 if (!seq)
384 die("Not enough memory\n");
385 seq->state = SEQ_STATE_UNINITIALIZED;
386 seq->addr = addr;
387
388 list_add(&seq->list, &ts->seq_list);
389 return seq;
390}
391
392static int bad_hist[4];
393
244static void 394static void
245report_lock_acquire_event(struct trace_acquire_event *acquire_event, 395report_lock_acquire_event(struct trace_acquire_event *acquire_event,
246 struct event *__event __used, 396 struct event *__event __used,
247 int cpu __used, 397 int cpu __used,
248 u64 timestamp, 398 u64 timestamp __used,
249 struct thread *thread __used) 399 struct thread *thread __used)
250{ 400{
251 struct lock_stat *st; 401 struct lock_stat *ls;
402 struct thread_stat *ts;
403 struct lock_seq_stat *seq;
404
405 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
406 if (ls->discard)
407 return;
252 408
253 st = lock_stat_findnew(acquire_event->addr, acquire_event->name); 409 ts = thread_stat_findnew(thread->pid);
410 seq = get_seq(ts, acquire_event->addr);
254 411
255 switch (st->state) { 412 switch (seq->state) {
256 case LOCK_STATE_UNLOCKED: 413 case SEQ_STATE_UNINITIALIZED:
414 case SEQ_STATE_RELEASED:
415 if (!acquire_event->flag) {
416 seq->state = SEQ_STATE_ACQUIRING;
417 } else {
418 if (acquire_event->flag & 1)
419 ls->nr_trylock++;
420 if (acquire_event->flag & 2)
421 ls->nr_readlock++;
422 seq->state = SEQ_STATE_READ_ACQUIRED;
423 seq->read_count = 1;
424 ls->nr_acquired++;
425 }
426 break;
427 case SEQ_STATE_READ_ACQUIRED:
428 if (acquire_event->flag & 2) {
429 seq->read_count++;
430 ls->nr_acquired++;
431 goto end;
432 } else {
433 goto broken;
434 }
257 break; 435 break;
258 case LOCK_STATE_LOCKED: 436 case SEQ_STATE_ACQUIRED:
437 case SEQ_STATE_ACQUIRING:
438 case SEQ_STATE_CONTENDED:
439broken:
440 /* broken lock sequence, discard it */
441 ls->discard = 1;
442 bad_hist[0]++;
443 list_del(&seq->list);
444 free(seq);
445 goto end;
259 break; 446 break;
260 default: 447 default:
261 BUG_ON(1); 448 BUG_ON("Unknown state of lock sequence found!\n");
262 break; 449 break;
263 } 450 }
264 451
265 st->prev_event_time = timestamp; 452 ls->nr_acquire++;
453 seq->prev_event_time = timestamp;
454end:
455 return;
266} 456}
267 457
268static void 458static void
269report_lock_acquired_event(struct trace_acquired_event *acquired_event, 459report_lock_acquired_event(struct trace_acquired_event *acquired_event,
270 struct event *__event __used, 460 struct event *__event __used,
271 int cpu __used, 461 int cpu __used,
272 u64 timestamp, 462 u64 timestamp __used,
273 struct thread *thread __used) 463 struct thread *thread __used)
274{ 464{
275 struct lock_stat *st; 465 struct lock_stat *ls;
466 struct thread_stat *ts;
467 struct lock_seq_stat *seq;
468 u64 contended_term;
276 469
277 st = lock_stat_findnew(acquired_event->addr, acquired_event->name); 470 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
471 if (ls->discard)
472 return;
473
474 ts = thread_stat_findnew(thread->pid);
475 seq = get_seq(ts, acquired_event->addr);
278 476
279 switch (st->state) { 477 switch (seq->state) {
280 case LOCK_STATE_UNLOCKED: 478 case SEQ_STATE_UNINITIALIZED:
281 st->state = LOCK_STATE_LOCKED; 479 /* orphan event, do nothing */
282 st->nr_acquired++; 480 return;
481 case SEQ_STATE_ACQUIRING:
283 break; 482 break;
284 case LOCK_STATE_LOCKED: 483 case SEQ_STATE_CONTENDED:
484 contended_term = timestamp - seq->prev_event_time;
485 ls->wait_time_total += contended_term;
486
487 if (contended_term < ls->wait_time_min)
488 ls->wait_time_min = contended_term;
489 else if (ls->wait_time_max < contended_term)
490 ls->wait_time_max = contended_term;
285 break; 491 break;
492 case SEQ_STATE_RELEASED:
493 case SEQ_STATE_ACQUIRED:
494 case SEQ_STATE_READ_ACQUIRED:
495 /* broken lock sequence, discard it */
496 ls->discard = 1;
497 bad_hist[1]++;
498 list_del(&seq->list);
499 free(seq);
500 goto end;
501 break;
502
286 default: 503 default:
287 BUG_ON(1); 504 BUG_ON("Unknown state of lock sequence found!\n");
288 break; 505 break;
289 } 506 }
290 507
291 st->prev_event_time = timestamp; 508 seq->state = SEQ_STATE_ACQUIRED;
509 ls->nr_acquired++;
510 seq->prev_event_time = timestamp;
511end:
512 return;
292} 513}
293 514
294static void 515static void
295report_lock_contended_event(struct trace_contended_event *contended_event, 516report_lock_contended_event(struct trace_contended_event *contended_event,
296 struct event *__event __used, 517 struct event *__event __used,
297 int cpu __used, 518 int cpu __used,
298 u64 timestamp, 519 u64 timestamp __used,
299 struct thread *thread __used) 520 struct thread *thread __used)
300{ 521{
301 struct lock_stat *st; 522 struct lock_stat *ls;
523 struct thread_stat *ts;
524 struct lock_seq_stat *seq;
525
526 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
527 if (ls->discard)
528 return;
302 529
303 st = lock_stat_findnew(contended_event->addr, contended_event->name); 530 ts = thread_stat_findnew(thread->pid);
531 seq = get_seq(ts, contended_event->addr);
304 532
305 switch (st->state) { 533 switch (seq->state) {
306 case LOCK_STATE_UNLOCKED: 534 case SEQ_STATE_UNINITIALIZED:
535 /* orphan event, do nothing */
536 return;
537 case SEQ_STATE_ACQUIRING:
307 break; 538 break;
308 case LOCK_STATE_LOCKED: 539 case SEQ_STATE_RELEASED:
309 st->nr_contended++; 540 case SEQ_STATE_ACQUIRED:
541 case SEQ_STATE_READ_ACQUIRED:
542 case SEQ_STATE_CONTENDED:
543 /* broken lock sequence, discard it */
544 ls->discard = 1;
545 bad_hist[2]++;
546 list_del(&seq->list);
547 free(seq);
548 goto end;
310 break; 549 break;
311 default: 550 default:
312 BUG_ON(1); 551 BUG_ON("Unknown state of lock sequence found!\n");
313 break; 552 break;
314 } 553 }
315 554
316 st->prev_event_time = timestamp; 555 seq->state = SEQ_STATE_CONTENDED;
556 ls->nr_contended++;
557 seq->prev_event_time = timestamp;
558end:
559 return;
317} 560}
318 561
319static void 562static void
320report_lock_release_event(struct trace_release_event *release_event, 563report_lock_release_event(struct trace_release_event *release_event,
321 struct event *__event __used, 564 struct event *__event __used,
322 int cpu __used, 565 int cpu __used,
323 u64 timestamp, 566 u64 timestamp __used,
324 struct thread *thread __used) 567 struct thread *thread __used)
325{ 568{
326 struct lock_stat *st; 569 struct lock_stat *ls;
327 u64 hold_time; 570 struct thread_stat *ts;
571 struct lock_seq_stat *seq;
328 572
329 st = lock_stat_findnew(release_event->addr, release_event->name); 573 ls = lock_stat_findnew(release_event->addr, release_event->name);
574 if (ls->discard)
575 return;
330 576
331 switch (st->state) { 577 ts = thread_stat_findnew(thread->pid);
332 case LOCK_STATE_UNLOCKED: 578 seq = get_seq(ts, release_event->addr);
333 break;
334 case LOCK_STATE_LOCKED:
335 st->state = LOCK_STATE_UNLOCKED;
336 hold_time = timestamp - st->prev_event_time;
337 579
338 if (timestamp < st->prev_event_time) { 580 switch (seq->state) {
339 /* terribly, this can happen... */ 581 case SEQ_STATE_UNINITIALIZED:
582 goto end;
583 break;
584 case SEQ_STATE_ACQUIRED:
585 break;
586 case SEQ_STATE_READ_ACQUIRED:
587 seq->read_count--;
588 BUG_ON(seq->read_count < 0);
589 if (!seq->read_count) {
590 ls->nr_release++;
340 goto end; 591 goto end;
341 } 592 }
342 593 break;
343 if (st->wait_time_min > hold_time) 594 case SEQ_STATE_ACQUIRING:
344 st->wait_time_min = hold_time; 595 case SEQ_STATE_CONTENDED:
345 if (st->wait_time_max < hold_time) 596 case SEQ_STATE_RELEASED:
346 st->wait_time_max = hold_time; 597 /* broken lock sequence, discard it */
347 st->wait_time_total += hold_time; 598 ls->discard = 1;
348 599 bad_hist[3]++;
349 st->nr_release++; 600 goto free_seq;
350 break; 601 break;
351 default: 602 default:
352 BUG_ON(1); 603 BUG_ON("Unknown state of lock sequence found!\n");
353 break; 604 break;
354 } 605 }
355 606
607 ls->nr_release++;
608free_seq:
609 list_del(&seq->list);
610 free(seq);
356end: 611end:
357 st->prev_event_time = timestamp; 612 return;
358} 613}
359 614
360/* lock oriented handlers */ 615/* lock oriented handlers */
@@ -381,6 +636,7 @@ process_lock_acquire_event(void *data,
381 tmp = raw_field_value(event, "lockdep_addr", data); 636 tmp = raw_field_value(event, "lockdep_addr", data);
382 memcpy(&acquire_event.addr, &tmp, sizeof(void *)); 637 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
383 acquire_event.name = (char *)raw_field_ptr(event, "name", data); 638 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
639 acquire_event.flag = (int)raw_field_value(event, "flag", data);
384 640
385 if (trace_handler->acquire_event) 641 if (trace_handler->acquire_event)
386 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); 642 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
@@ -441,8 +697,8 @@ process_lock_release_event(void *data,
441} 697}
442 698
443static void 699static void
444process_raw_event(void *data, int cpu, 700process_raw_event(void *data, int cpu __used,
445 u64 timestamp, struct thread *thread) 701 u64 timestamp __used, struct thread *thread __used)
446{ 702{
447 struct event *event; 703 struct event *event;
448 int type; 704 int type;
@@ -604,14 +860,15 @@ static void queue_raw_event(void *data, int raw_size, int cpu,
604 } 860 }
605} 861}
606 862
607static int process_sample_event(event_t *event, struct perf_session *session) 863static int process_sample_event(event_t *event, struct perf_session *s)
608{ 864{
609 struct thread *thread; 865 struct thread *thread;
610 struct sample_data data; 866 struct sample_data data;
611 867
612 bzero(&data, sizeof(struct sample_data)); 868 bzero(&data, sizeof(struct sample_data));
613 event__parse_sample(event, session->sample_type, &data); 869 event__parse_sample(event, s->sample_type, &data);
614 thread = perf_session__findnew(session, data.pid); 870 /* CAUTION: using tid as thread.pid */
871 thread = perf_session__findnew(s, data.tid);
615 872
616 if (thread == NULL) { 873 if (thread == NULL) {
617 pr_debug("problem processing %d event, skipping it.\n", 874 pr_debug("problem processing %d event, skipping it.\n",
@@ -634,8 +891,8 @@ static void print_result(void)
634{ 891{
635 struct lock_stat *st; 892 struct lock_stat *st;
636 char cut_name[20]; 893 char cut_name[20];
894 int bad, total;
637 895
638 printf("%18s ", "ID");
639 printf("%20s ", "Name"); 896 printf("%20s ", "Name");
640 printf("%10s ", "acquired"); 897 printf("%10s ", "acquired");
641 printf("%10s ", "contended"); 898 printf("%10s ", "contended");
@@ -646,11 +903,15 @@ static void print_result(void)
646 903
647 printf("\n\n"); 904 printf("\n\n");
648 905
906 bad = total = 0;
649 while ((st = pop_from_result())) { 907 while ((st = pop_from_result())) {
908 total++;
909 if (st->discard) {
910 bad++;
911 continue;
912 }
650 bzero(cut_name, 20); 913 bzero(cut_name, 20);
651 914
652 printf("%p ", st->addr);
653
654 if (strlen(st->name) < 16) { 915 if (strlen(st->name) < 16) {
655 /* output raw name */ 916 /* output raw name */
656 printf("%20s ", st->name); 917 printf("%20s ", st->name);
@@ -673,6 +934,21 @@ static void print_result(void)
673 0 : st->wait_time_min); 934 0 : st->wait_time_min);
674 printf("\n"); 935 printf("\n");
675 } 936 }
937
938 {
939 /* Output for debug, this have to be removed */
940 int i;
941 const char *name[4] =
942 { "acquire", "acquired", "contended", "release" };
943
944 printf("\n=== output for debug===\n\n");
945 printf("bad:%d, total:%d\n", bad, total);
946 printf("bad rate:%f\n", (double)(bad / total));
947
948 printf("histogram of events caused bad sequence\n");
949 for (i = 0; i < 4; i++)
950 printf(" %10s: %d\n", name[i], bad_hist[i]);
951 }
676} 952}
677 953
678static void dump_map(void) 954static void dump_map(void)
@@ -692,8 +968,6 @@ static struct perf_event_ops eops = {
692 .comm = event__process_comm, 968 .comm = event__process_comm,
693}; 969};
694 970
695static struct perf_session *session;
696
697static int read_events(void) 971static int read_events(void)
698{ 972{
699 session = perf_session__new(input_name, O_RDONLY, 0); 973 session = perf_session__new(input_name, O_RDONLY, 0);