aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/perf/Documentation/perf-kvm.txt9
-rw-r--r--tools/perf/builtin-kmem.c6
-rw-r--r--tools/perf/builtin-lock.c595
-rw-r--r--tools/perf/builtin-sched.c8
-rw-r--r--tools/perf/builtin-timechart.c112
-rw-r--r--tools/perf/builtin-trace.c13
-rw-r--r--tools/perf/util/parse-events.c13
-rw-r--r--tools/perf/util/session.c179
-rw-r--r--tools/perf/util/session.h10
9 files changed, 583 insertions, 362 deletions
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 93400a0f17f..d004e19fe6d 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -1,5 +1,5 @@
1perf-kvm(1) 1perf-kvm(1)
2============== 2===========
3 3
4NAME 4NAME
5---- 5----
@@ -41,9 +41,9 @@ There are a couple of variants of perf kvm:
41OPTIONS 41OPTIONS
42------- 42-------
43--host=:: 43--host=::
44 Collect host side perforamnce profile. 44 Collect host side performance profile.
45--guest=:: 45--guest=::
46 Collect guest side perforamnce profile. 46 Collect guest side performance profile.
47--guestmount=<path>:: 47--guestmount=<path>::
48 Guest os root file system mount directory. Users mounts guest os 48 Guest os root file system mount directory. Users mounts guest os
49 root directories under <path> by a specific filesystem access method, 49 root directories under <path> by a specific filesystem access method,
@@ -64,4 +64,5 @@ OPTIONS
64 64
65SEE ALSO 65SEE ALSO
66-------- 66--------
67linkperf:perf-top[1] perf-record[1] perf-report[1] perf-diff[1] perf-buildid-list[1] 67linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
68linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index db474bbf332..ab906cbd5c7 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -335,8 +335,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
335} 335}
336 336
337static struct perf_event_ops event_ops = { 337static struct perf_event_ops event_ops = {
338 .sample = process_sample_event, 338 .sample = process_sample_event,
339 .comm = event__process_comm, 339 .comm = event__process_comm,
340 .ordered_samples = true,
340}; 341};
341 342
342static double fragmentation(unsigned long n_req, unsigned long n_alloc) 343static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -730,7 +731,6 @@ static const char *record_args[] = {
730 "record", 731 "record",
731 "-a", 732 "-a",
732 "-R", 733 "-R",
733 "-M",
734 "-f", 734 "-f",
735 "-c", "1", 735 "-c", "1",
736 "-e", "kmem:kmalloc", 736 "-e", "kmem:kmalloc",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6c38e4febf9..ce276750b14 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -23,6 +23,8 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/hash.h> 24#include <linux/hash.h>
25 25
26static struct perf_session *session;
27
26/* based on kernel/lockdep.c */ 28/* based on kernel/lockdep.c */
27#define LOCKHASH_BITS 12 29#define LOCKHASH_BITS 12
28#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) 30#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
@@ -32,9 +34,6 @@ static struct list_head lockhash_table[LOCKHASH_SIZE];
32#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) 34#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
33#define lockhashentry(key) (lockhash_table + __lockhashfn((key))) 35#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
34 36
35#define LOCK_STATE_UNLOCKED 0 /* initial state */
36#define LOCK_STATE_LOCKED 1
37
38struct lock_stat { 37struct lock_stat {
39 struct list_head hash_entry; 38 struct list_head hash_entry;
40 struct rb_node rb; /* used for sorting */ 39 struct rb_node rb; /* used for sorting */
@@ -47,20 +46,151 @@ struct lock_stat {
47 void *addr; /* address of lockdep_map, used as ID */ 46 void *addr; /* address of lockdep_map, used as ID */
48 char *name; /* for strcpy(), we cannot use const */ 47 char *name; /* for strcpy(), we cannot use const */
49 48
50 int state;
51 u64 prev_event_time; /* timestamp of previous event */
52
53 unsigned int nr_acquired;
54 unsigned int nr_acquire; 49 unsigned int nr_acquire;
50 unsigned int nr_acquired;
55 unsigned int nr_contended; 51 unsigned int nr_contended;
56 unsigned int nr_release; 52 unsigned int nr_release;
57 53
54 unsigned int nr_readlock;
55 unsigned int nr_trylock;
58 /* these times are in nano sec. */ 56 /* these times are in nano sec. */
59 u64 wait_time_total; 57 u64 wait_time_total;
60 u64 wait_time_min; 58 u64 wait_time_min;
61 u64 wait_time_max; 59 u64 wait_time_max;
60
61 int discard; /* flag of blacklist */
62};
63
64/*
65 * States of lock_seq_stat
66 *
67 * UNINITIALIZED is required for detecting first event of acquire.
68 * As the nature of lock events, there is no guarantee
69 * that the first event for the locks are acquire,
70 * it can be acquired, contended or release.
71 */
72#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
73#define SEQ_STATE_RELEASED 1
74#define SEQ_STATE_ACQUIRING 2
75#define SEQ_STATE_ACQUIRED 3
76#define SEQ_STATE_READ_ACQUIRED 4
77#define SEQ_STATE_CONTENDED 5
78
79/*
80 * MAX_LOCK_DEPTH
81 * Imported from include/linux/sched.h.
82 * Should this be synchronized?
83 */
84#define MAX_LOCK_DEPTH 48
85
86/*
87 * struct lock_seq_stat:
88 * Place to put on state of one lock sequence
89 * 1) acquire -> acquired -> release
90 * 2) acquire -> contended -> acquired -> release
91 * 3) acquire (with read or try) -> release
92 * 4) Are there other patterns?
93 */
94struct lock_seq_stat {
95 struct list_head list;
96 int state;
97 u64 prev_event_time;
98 void *addr;
99
100 int read_count;
62}; 101};
63 102
103struct thread_stat {
104 struct rb_node rb;
105
106 u32 tid;
107 struct list_head seq_list;
108};
109
110static struct rb_root thread_stats;
111
112static struct thread_stat *thread_stat_find(u32 tid)
113{
114 struct rb_node *node;
115 struct thread_stat *st;
116
117 node = thread_stats.rb_node;
118 while (node) {
119 st = container_of(node, struct thread_stat, rb);
120 if (st->tid == tid)
121 return st;
122 else if (tid < st->tid)
123 node = node->rb_left;
124 else
125 node = node->rb_right;
126 }
127
128 return NULL;
129}
130
131static void thread_stat_insert(struct thread_stat *new)
132{
133 struct rb_node **rb = &thread_stats.rb_node;
134 struct rb_node *parent = NULL;
135 struct thread_stat *p;
136
137 while (*rb) {
138 p = container_of(*rb, struct thread_stat, rb);
139 parent = *rb;
140
141 if (new->tid < p->tid)
142 rb = &(*rb)->rb_left;
143 else if (new->tid > p->tid)
144 rb = &(*rb)->rb_right;
145 else
146 BUG_ON("inserting invalid thread_stat\n");
147 }
148
149 rb_link_node(&new->rb, parent, rb);
150 rb_insert_color(&new->rb, &thread_stats);
151}
152
153static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
154{
155 struct thread_stat *st;
156
157 st = thread_stat_find(tid);
158 if (st)
159 return st;
160
161 st = zalloc(sizeof(struct thread_stat));
162 if (!st)
163 die("memory allocation failed\n");
164
165 st->tid = tid;
166 INIT_LIST_HEAD(&st->seq_list);
167
168 thread_stat_insert(st);
169
170 return st;
171}
172
173static struct thread_stat *thread_stat_findnew_first(u32 tid);
174static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
175 thread_stat_findnew_first;
176
177static struct thread_stat *thread_stat_findnew_first(u32 tid)
178{
179 struct thread_stat *st;
180
181 st = zalloc(sizeof(struct thread_stat));
182 if (!st)
183 die("memory allocation failed\n");
184 st->tid = tid;
185 INIT_LIST_HEAD(&st->seq_list);
186
187 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
188 rb_insert_color(&st->rb, &thread_stats);
189
190 thread_stat_findnew = thread_stat_findnew_after_first;
191 return st;
192}
193
64/* build simple key function one is bigger than two */ 194/* build simple key function one is bigger than two */
65#define SINGLE_KEY(member) \ 195#define SINGLE_KEY(member) \
66 static int lock_stat_key_ ## member(struct lock_stat *one, \ 196 static int lock_stat_key_ ## member(struct lock_stat *one, \
@@ -175,8 +305,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
175 goto alloc_failed; 305 goto alloc_failed;
176 strcpy(new->name, name); 306 strcpy(new->name, name);
177 307
178 /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
179 new->state = LOCK_STATE_UNLOCKED;
180 new->wait_time_min = ULLONG_MAX; 308 new->wait_time_min = ULLONG_MAX;
181 309
182 list_add(&new->hash_entry, entry); 310 list_add(&new->hash_entry, entry);
@@ -188,8 +316,6 @@ alloc_failed:
188 316
189static char const *input_name = "perf.data"; 317static char const *input_name = "perf.data";
190 318
191static int profile_cpu = -1;
192
193struct raw_event_sample { 319struct raw_event_sample {
194 u32 size; 320 u32 size;
195 char data[0]; 321 char data[0];
@@ -198,6 +324,7 @@ struct raw_event_sample {
198struct trace_acquire_event { 324struct trace_acquire_event {
199 void *addr; 325 void *addr;
200 const char *name; 326 const char *name;
327 int flag;
201}; 328};
202 329
203struct trace_acquired_event { 330struct trace_acquired_event {
@@ -241,120 +368,246 @@ struct trace_lock_handler {
241 struct thread *thread); 368 struct thread *thread);
242}; 369};
243 370
371static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
372{
373 struct lock_seq_stat *seq;
374
375 list_for_each_entry(seq, &ts->seq_list, list) {
376 if (seq->addr == addr)
377 return seq;
378 }
379
380 seq = zalloc(sizeof(struct lock_seq_stat));
381 if (!seq)
382 die("Not enough memory\n");
383 seq->state = SEQ_STATE_UNINITIALIZED;
384 seq->addr = addr;
385
386 list_add(&seq->list, &ts->seq_list);
387 return seq;
388}
389
390static int bad_hist[4];
391
244static void 392static void
245report_lock_acquire_event(struct trace_acquire_event *acquire_event, 393report_lock_acquire_event(struct trace_acquire_event *acquire_event,
246 struct event *__event __used, 394 struct event *__event __used,
247 int cpu __used, 395 int cpu __used,
248 u64 timestamp, 396 u64 timestamp __used,
249 struct thread *thread __used) 397 struct thread *thread __used)
250{ 398{
251 struct lock_stat *st; 399 struct lock_stat *ls;
400 struct thread_stat *ts;
401 struct lock_seq_stat *seq;
402
403 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
404 if (ls->discard)
405 return;
252 406
253 st = lock_stat_findnew(acquire_event->addr, acquire_event->name); 407 ts = thread_stat_findnew(thread->pid);
408 seq = get_seq(ts, acquire_event->addr);
254 409
255 switch (st->state) { 410 switch (seq->state) {
256 case LOCK_STATE_UNLOCKED: 411 case SEQ_STATE_UNINITIALIZED:
412 case SEQ_STATE_RELEASED:
413 if (!acquire_event->flag) {
414 seq->state = SEQ_STATE_ACQUIRING;
415 } else {
416 if (acquire_event->flag & 1)
417 ls->nr_trylock++;
418 if (acquire_event->flag & 2)
419 ls->nr_readlock++;
420 seq->state = SEQ_STATE_READ_ACQUIRED;
421 seq->read_count = 1;
422 ls->nr_acquired++;
423 }
424 break;
425 case SEQ_STATE_READ_ACQUIRED:
426 if (acquire_event->flag & 2) {
427 seq->read_count++;
428 ls->nr_acquired++;
429 goto end;
430 } else {
431 goto broken;
432 }
257 break; 433 break;
258 case LOCK_STATE_LOCKED: 434 case SEQ_STATE_ACQUIRED:
435 case SEQ_STATE_ACQUIRING:
436 case SEQ_STATE_CONTENDED:
437broken:
438 /* broken lock sequence, discard it */
439 ls->discard = 1;
440 bad_hist[0]++;
441 list_del(&seq->list);
442 free(seq);
443 goto end;
259 break; 444 break;
260 default: 445 default:
261 BUG_ON(1); 446 BUG_ON("Unknown state of lock sequence found!\n");
262 break; 447 break;
263 } 448 }
264 449
265 st->prev_event_time = timestamp; 450 ls->nr_acquire++;
451 seq->prev_event_time = timestamp;
452end:
453 return;
266} 454}
267 455
268static void 456static void
269report_lock_acquired_event(struct trace_acquired_event *acquired_event, 457report_lock_acquired_event(struct trace_acquired_event *acquired_event,
270 struct event *__event __used, 458 struct event *__event __used,
271 int cpu __used, 459 int cpu __used,
272 u64 timestamp, 460 u64 timestamp __used,
273 struct thread *thread __used) 461 struct thread *thread __used)
274{ 462{
275 struct lock_stat *st; 463 struct lock_stat *ls;
464 struct thread_stat *ts;
465 struct lock_seq_stat *seq;
466 u64 contended_term;
276 467
277 st = lock_stat_findnew(acquired_event->addr, acquired_event->name); 468 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
469 if (ls->discard)
470 return;
278 471
279 switch (st->state) { 472 ts = thread_stat_findnew(thread->pid);
280 case LOCK_STATE_UNLOCKED: 473 seq = get_seq(ts, acquired_event->addr);
281 st->state = LOCK_STATE_LOCKED; 474
282 st->nr_acquired++; 475 switch (seq->state) {
476 case SEQ_STATE_UNINITIALIZED:
477 /* orphan event, do nothing */
478 return;
479 case SEQ_STATE_ACQUIRING:
480 break;
481 case SEQ_STATE_CONTENDED:
482 contended_term = timestamp - seq->prev_event_time;
483 ls->wait_time_total += contended_term;
484
485 if (contended_term < ls->wait_time_min)
486 ls->wait_time_min = contended_term;
487 else if (ls->wait_time_max < contended_term)
488 ls->wait_time_max = contended_term;
283 break; 489 break;
284 case LOCK_STATE_LOCKED: 490 case SEQ_STATE_RELEASED:
491 case SEQ_STATE_ACQUIRED:
492 case SEQ_STATE_READ_ACQUIRED:
493 /* broken lock sequence, discard it */
494 ls->discard = 1;
495 bad_hist[1]++;
496 list_del(&seq->list);
497 free(seq);
498 goto end;
285 break; 499 break;
500
286 default: 501 default:
287 BUG_ON(1); 502 BUG_ON("Unknown state of lock sequence found!\n");
288 break; 503 break;
289 } 504 }
290 505
291 st->prev_event_time = timestamp; 506 seq->state = SEQ_STATE_ACQUIRED;
507 ls->nr_acquired++;
508 seq->prev_event_time = timestamp;
509end:
510 return;
292} 511}
293 512
294static void 513static void
295report_lock_contended_event(struct trace_contended_event *contended_event, 514report_lock_contended_event(struct trace_contended_event *contended_event,
296 struct event *__event __used, 515 struct event *__event __used,
297 int cpu __used, 516 int cpu __used,
298 u64 timestamp, 517 u64 timestamp __used,
299 struct thread *thread __used) 518 struct thread *thread __used)
300{ 519{
301 struct lock_stat *st; 520 struct lock_stat *ls;
521 struct thread_stat *ts;
522 struct lock_seq_stat *seq;
302 523
303 st = lock_stat_findnew(contended_event->addr, contended_event->name); 524 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
525 if (ls->discard)
526 return;
527
528 ts = thread_stat_findnew(thread->pid);
529 seq = get_seq(ts, contended_event->addr);
304 530
305 switch (st->state) { 531 switch (seq->state) {
306 case LOCK_STATE_UNLOCKED: 532 case SEQ_STATE_UNINITIALIZED:
533 /* orphan event, do nothing */
534 return;
535 case SEQ_STATE_ACQUIRING:
307 break; 536 break;
308 case LOCK_STATE_LOCKED: 537 case SEQ_STATE_RELEASED:
309 st->nr_contended++; 538 case SEQ_STATE_ACQUIRED:
539 case SEQ_STATE_READ_ACQUIRED:
540 case SEQ_STATE_CONTENDED:
541 /* broken lock sequence, discard it */
542 ls->discard = 1;
543 bad_hist[2]++;
544 list_del(&seq->list);
545 free(seq);
546 goto end;
310 break; 547 break;
311 default: 548 default:
312 BUG_ON(1); 549 BUG_ON("Unknown state of lock sequence found!\n");
313 break; 550 break;
314 } 551 }
315 552
316 st->prev_event_time = timestamp; 553 seq->state = SEQ_STATE_CONTENDED;
554 ls->nr_contended++;
555 seq->prev_event_time = timestamp;
556end:
557 return;
317} 558}
318 559
319static void 560static void
320report_lock_release_event(struct trace_release_event *release_event, 561report_lock_release_event(struct trace_release_event *release_event,
321 struct event *__event __used, 562 struct event *__event __used,
322 int cpu __used, 563 int cpu __used,
323 u64 timestamp, 564 u64 timestamp __used,
324 struct thread *thread __used) 565 struct thread *thread __used)
325{ 566{
326 struct lock_stat *st; 567 struct lock_stat *ls;
327 u64 hold_time; 568 struct thread_stat *ts;
569 struct lock_seq_stat *seq;
328 570
329 st = lock_stat_findnew(release_event->addr, release_event->name); 571 ls = lock_stat_findnew(release_event->addr, release_event->name);
572 if (ls->discard)
573 return;
330 574
331 switch (st->state) { 575 ts = thread_stat_findnew(thread->pid);
332 case LOCK_STATE_UNLOCKED: 576 seq = get_seq(ts, release_event->addr);
333 break;
334 case LOCK_STATE_LOCKED:
335 st->state = LOCK_STATE_UNLOCKED;
336 hold_time = timestamp - st->prev_event_time;
337 577
338 if (timestamp < st->prev_event_time) { 578 switch (seq->state) {
339 /* terribly, this can happen... */ 579 case SEQ_STATE_UNINITIALIZED:
580 goto end;
581 break;
582 case SEQ_STATE_ACQUIRED:
583 break;
584 case SEQ_STATE_READ_ACQUIRED:
585 seq->read_count--;
586 BUG_ON(seq->read_count < 0);
587 if (!seq->read_count) {
588 ls->nr_release++;
340 goto end; 589 goto end;
341 } 590 }
342 591 break;
343 if (st->wait_time_min > hold_time) 592 case SEQ_STATE_ACQUIRING:
344 st->wait_time_min = hold_time; 593 case SEQ_STATE_CONTENDED:
345 if (st->wait_time_max < hold_time) 594 case SEQ_STATE_RELEASED:
346 st->wait_time_max = hold_time; 595 /* broken lock sequence, discard it */
347 st->wait_time_total += hold_time; 596 ls->discard = 1;
348 597 bad_hist[3]++;
349 st->nr_release++; 598 goto free_seq;
350 break; 599 break;
351 default: 600 default:
352 BUG_ON(1); 601 BUG_ON("Unknown state of lock sequence found!\n");
353 break; 602 break;
354 } 603 }
355 604
605 ls->nr_release++;
606free_seq:
607 list_del(&seq->list);
608 free(seq);
356end: 609end:
357 st->prev_event_time = timestamp; 610 return;
358} 611}
359 612
360/* lock oriented handlers */ 613/* lock oriented handlers */
@@ -381,6 +634,7 @@ process_lock_acquire_event(void *data,
381 tmp = raw_field_value(event, "lockdep_addr", data); 634 tmp = raw_field_value(event, "lockdep_addr", data);
382 memcpy(&acquire_event.addr, &tmp, sizeof(void *)); 635 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
383 acquire_event.name = (char *)raw_field_ptr(event, "name", data); 636 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
637 acquire_event.flag = (int)raw_field_value(event, "flag", data);
384 638
385 if (trace_handler->acquire_event) 639 if (trace_handler->acquire_event)
386 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); 640 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
@@ -441,8 +695,7 @@ process_lock_release_event(void *data,
441} 695}
442 696
443static void 697static void
444process_raw_event(void *data, int cpu, 698process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
445 u64 timestamp, struct thread *thread)
446{ 699{
447 struct event *event; 700 struct event *event;
448 int type; 701 int type;
@@ -460,182 +713,13 @@ process_raw_event(void *data, int cpu,
460 process_lock_release_event(data, event, cpu, timestamp, thread); 713 process_lock_release_event(data, event, cpu, timestamp, thread);
461} 714}
462 715
463struct raw_event_queue {
464 u64 timestamp;
465 int cpu;
466 void *data;
467 struct thread *thread;
468 struct list_head list;
469};
470
471static LIST_HEAD(raw_event_head);
472
473#define FLUSH_PERIOD (5 * NSEC_PER_SEC)
474
475static u64 flush_limit = ULLONG_MAX;
476static u64 last_flush = 0;
477struct raw_event_queue *last_inserted;
478
479static void flush_raw_event_queue(u64 limit)
480{
481 struct raw_event_queue *tmp, *iter;
482
483 list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
484 if (iter->timestamp > limit)
485 return;
486
487 if (iter == last_inserted)
488 last_inserted = NULL;
489
490 process_raw_event(iter->data, iter->cpu, iter->timestamp,
491 iter->thread);
492
493 last_flush = iter->timestamp;
494 list_del(&iter->list);
495 free(iter->data);
496 free(iter);
497 }
498}
499
500static void __queue_raw_event_end(struct raw_event_queue *new)
501{
502 struct raw_event_queue *iter;
503
504 list_for_each_entry_reverse(iter, &raw_event_head, list) {
505 if (iter->timestamp < new->timestamp) {
506 list_add(&new->list, &iter->list);
507 return;
508 }
509 }
510
511 list_add(&new->list, &raw_event_head);
512}
513
514static void __queue_raw_event_before(struct raw_event_queue *new,
515 struct raw_event_queue *iter)
516{
517 list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
518 if (iter->timestamp < new->timestamp) {
519 list_add(&new->list, &iter->list);
520 return;
521 }
522 }
523
524 list_add(&new->list, &raw_event_head);
525}
526
527static void __queue_raw_event_after(struct raw_event_queue *new,
528 struct raw_event_queue *iter)
529{
530 list_for_each_entry_continue(iter, &raw_event_head, list) {
531 if (iter->timestamp > new->timestamp) {
532 list_add_tail(&new->list, &iter->list);
533 return;
534 }
535 }
536 list_add_tail(&new->list, &raw_event_head);
537}
538
539/* The queue is ordered by time */
540static void __queue_raw_event(struct raw_event_queue *new)
541{
542 if (!last_inserted) {
543 __queue_raw_event_end(new);
544 return;
545 }
546
547 /*
548 * Most of the time the current event has a timestamp
549 * very close to the last event inserted, unless we just switched
550 * to another event buffer. Having a sorting based on a list and
551 * on the last inserted event that is close to the current one is
552 * probably more efficient than an rbtree based sorting.
553 */
554 if (last_inserted->timestamp >= new->timestamp)
555 __queue_raw_event_before(new, last_inserted);
556 else
557 __queue_raw_event_after(new, last_inserted);
558}
559
560static void queue_raw_event(void *data, int raw_size, int cpu,
561 u64 timestamp, struct thread *thread)
562{
563 struct raw_event_queue *new;
564
565 if (flush_limit == ULLONG_MAX)
566 flush_limit = timestamp + FLUSH_PERIOD;
567
568 if (timestamp < last_flush) {
569 printf("Warning: Timestamp below last timeslice flush\n");
570 return;
571 }
572
573 new = malloc(sizeof(*new));
574 if (!new)
575 die("Not enough memory\n");
576
577 new->timestamp = timestamp;
578 new->cpu = cpu;
579 new->thread = thread;
580
581 new->data = malloc(raw_size);
582 if (!new->data)
583 die("Not enough memory\n");
584
585 memcpy(new->data, data, raw_size);
586
587 __queue_raw_event(new);
588 last_inserted = new;
589
590 /*
591 * We want to have a slice of events covering 2 * FLUSH_PERIOD
592 * If FLUSH_PERIOD is big enough, it ensures every events that occured
593 * in the first half of the timeslice have all been buffered and there
594 * are none remaining (we need that because of the weakly ordered
595 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
596 * timeslice, we flush the first half to be gentle with the memory
597 * (the second half can still get new events in the middle, so wait
598 * another period to flush it)
599 */
600 if (new->timestamp > flush_limit &&
601 new->timestamp - flush_limit > FLUSH_PERIOD) {
602 flush_limit += FLUSH_PERIOD;
603 flush_raw_event_queue(flush_limit);
604 }
605}
606
607static int process_sample_event(event_t *event, struct perf_session *session)
608{
609 struct thread *thread;
610 struct sample_data data;
611
612 bzero(&data, sizeof(struct sample_data));
613 event__parse_sample(event, session->sample_type, &data);
614 thread = perf_session__findnew(session, data.pid);
615
616 if (thread == NULL) {
617 pr_debug("problem processing %d event, skipping it.\n",
618 event->header.type);
619 return -1;
620 }
621
622 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
623
624 if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
625 return 0;
626
627 queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
628
629 return 0;
630}
631
632/* TODO: various way to print, coloring, nano or milli sec */ 716/* TODO: various way to print, coloring, nano or milli sec */
633static void print_result(void) 717static void print_result(void)
634{ 718{
635 struct lock_stat *st; 719 struct lock_stat *st;
636 char cut_name[20]; 720 char cut_name[20];
721 int bad, total;
637 722
638 printf("%18s ", "ID");
639 printf("%20s ", "Name"); 723 printf("%20s ", "Name");
640 printf("%10s ", "acquired"); 724 printf("%10s ", "acquired");
641 printf("%10s ", "contended"); 725 printf("%10s ", "contended");
@@ -646,11 +730,15 @@ static void print_result(void)
646 730
647 printf("\n\n"); 731 printf("\n\n");
648 732
733 bad = total = 0;
649 while ((st = pop_from_result())) { 734 while ((st = pop_from_result())) {
735 total++;
736 if (st->discard) {
737 bad++;
738 continue;
739 }
650 bzero(cut_name, 20); 740 bzero(cut_name, 20);
651 741
652 printf("%p ", st->addr);
653
654 if (strlen(st->name) < 16) { 742 if (strlen(st->name) < 16) {
655 /* output raw name */ 743 /* output raw name */
656 printf("%20s ", st->name); 744 printf("%20s ", st->name);
@@ -673,6 +761,21 @@ static void print_result(void)
673 0 : st->wait_time_min); 761 0 : st->wait_time_min);
674 printf("\n"); 762 printf("\n");
675 } 763 }
764
765 {
766 /* Output for debug, this have to be removed */
767 int i;
768 const char *name[4] =
769 { "acquire", "acquired", "contended", "release" };
770
771 printf("\n=== output for debug===\n\n");
772 printf("bad:%d, total:%d\n", bad, total);
773 printf("bad rate:%f\n", (double)(bad / total));
774
775 printf("histogram of events caused bad sequence\n");
776 for (i = 0; i < 4; i++)
777 printf(" %10s: %d\n", name[i], bad_hist[i]);
778 }
676} 779}
677 780
678static void dump_map(void) 781static void dump_map(void)
@@ -687,13 +790,32 @@ static void dump_map(void)
687 } 790 }
688} 791}
689 792
793static int process_sample_event(event_t *self, struct perf_session *s)
794{
795 struct sample_data data;
796 struct thread *thread;
797
798 bzero(&data, sizeof(data));
799 event__parse_sample(self, s->sample_type, &data);
800
801 thread = perf_session__findnew(s, data.tid);
802 if (thread == NULL) {
803 pr_debug("problem processing %d event, skipping it.\n",
804 self->header.type);
805 return -1;
806 }
807
808 process_raw_event(data.raw_data, data.cpu, data.time, thread);
809
810 return 0;
811}
812
690static struct perf_event_ops eops = { 813static struct perf_event_ops eops = {
691 .sample = process_sample_event, 814 .sample = process_sample_event,
692 .comm = event__process_comm, 815 .comm = event__process_comm,
816 .ordered_samples = true,
693}; 817};
694 818
695static struct perf_session *session;
696
697static int read_events(void) 819static int read_events(void)
698{ 820{
699 session = perf_session__new(input_name, O_RDONLY, 0); 821 session = perf_session__new(input_name, O_RDONLY, 0);
@@ -720,7 +842,6 @@ static void __cmd_report(void)
720 setup_pager(); 842 setup_pager();
721 select_key(); 843 select_key();
722 read_events(); 844 read_events();
723 flush_raw_event_queue(ULLONG_MAX);
724 sort_result(); 845 sort_result();
725 print_result(); 846 print_result();
726} 847}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 09ddc8e6d8e..94453f1e4e0 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1651,9 +1651,10 @@ static int process_lost_event(event_t *event __used,
1651} 1651}
1652 1652
1653static struct perf_event_ops event_ops = { 1653static struct perf_event_ops event_ops = {
1654 .sample = process_sample_event, 1654 .sample = process_sample_event,
1655 .comm = event__process_comm, 1655 .comm = event__process_comm,
1656 .lost = process_lost_event, 1656 .lost = process_lost_event,
1657 .ordered_samples = true,
1657}; 1658};
1658 1659
1659static int read_events(void) 1660static int read_events(void)
@@ -1850,7 +1851,6 @@ static const char *record_args[] = {
1850 "record", 1851 "record",
1851 "-a", 1852 "-a",
1852 "-R", 1853 "-R",
1853 "-M",
1854 "-f", 1854 "-f",
1855 "-m", "1024", 1855 "-m", "1024",
1856 "-c", "1", 1856 "-c", "1",
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 96f4a092df3..c35aa44f82b 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -143,9 +143,6 @@ struct wake_event {
143static struct power_event *power_events; 143static struct power_event *power_events;
144static struct wake_event *wake_events; 144static struct wake_event *wake_events;
145 145
146struct sample_wrapper *all_samples;
147
148
149struct process_filter; 146struct process_filter;
150struct process_filter { 147struct process_filter {
151 char *name; 148 char *name;
@@ -566,88 +563,6 @@ static void end_sample_processing(void)
566 } 563 }
567} 564}
568 565
569static u64 sample_time(event_t *event, const struct perf_session *session)
570{
571 int cursor;
572
573 cursor = 0;
574 if (session->sample_type & PERF_SAMPLE_IP)
575 cursor++;
576 if (session->sample_type & PERF_SAMPLE_TID)
577 cursor++;
578 if (session->sample_type & PERF_SAMPLE_TIME)
579 return event->sample.array[cursor];
580 return 0;
581}
582
583
584/*
585 * We first queue all events, sorted backwards by insertion.
586 * The order will get flipped later.
587 */
588static int queue_sample_event(event_t *event, struct perf_session *session)
589{
590 struct sample_wrapper *copy, *prev;
591 int size;
592
593 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8;
594
595 copy = malloc(size);
596 if (!copy)
597 return 1;
598
599 memset(copy, 0, size);
600
601 copy->next = NULL;
602 copy->timestamp = sample_time(event, session);
603
604 memcpy(&copy->data, event, event->sample.header.size);
605
606 /* insert in the right place in the list */
607
608 if (!all_samples) {
609 /* first sample ever */
610 all_samples = copy;
611 return 0;
612 }
613
614 if (all_samples->timestamp < copy->timestamp) {
615 /* insert at the head of the list */
616 copy->next = all_samples;
617 all_samples = copy;
618 return 0;
619 }
620
621 prev = all_samples;
622 while (prev->next) {
623 if (prev->next->timestamp < copy->timestamp) {
624 copy->next = prev->next;
625 prev->next = copy;
626 return 0;
627 }
628 prev = prev->next;
629 }
630 /* insert at the end of the list */
631 prev->next = copy;
632
633 return 0;
634}
635
636static void sort_queued_samples(void)
637{
638 struct sample_wrapper *cursor, *next;
639
640 cursor = all_samples;
641 all_samples = NULL;
642
643 while (cursor) {
644 next = cursor->next;
645 cursor->next = all_samples;
646 all_samples = cursor;
647 cursor = next;
648 }
649}
650
651/* 566/*
652 * Sort the pid datastructure 567 * Sort the pid datastructure
653 */ 568 */
@@ -1011,26 +926,12 @@ static void write_svg_file(const char *filename)
1011 svg_close(); 926 svg_close();
1012} 927}
1013 928
1014static void process_samples(struct perf_session *session)
1015{
1016 struct sample_wrapper *cursor;
1017 event_t *event;
1018
1019 sort_queued_samples();
1020
1021 cursor = all_samples;
1022 while (cursor) {
1023 event = (void *)&cursor->data;
1024 cursor = cursor->next;
1025 process_sample_event(event, session);
1026 }
1027}
1028
1029static struct perf_event_ops event_ops = { 929static struct perf_event_ops event_ops = {
1030 .comm = process_comm_event, 930 .comm = process_comm_event,
1031 .fork = process_fork_event, 931 .fork = process_fork_event,
1032 .exit = process_exit_event, 932 .exit = process_exit_event,
1033 .sample = queue_sample_event, 933 .sample = process_sample_event,
934 .ordered_samples = true,
1034}; 935};
1035 936
1036static int __cmd_timechart(void) 937static int __cmd_timechart(void)
@@ -1048,8 +949,6 @@ static int __cmd_timechart(void)
1048 if (ret) 949 if (ret)
1049 goto out_delete; 950 goto out_delete;
1050 951
1051 process_samples(session);
1052
1053 end_sample_processing(); 952 end_sample_processing();
1054 953
1055 sort_pids(); 954 sort_pids();
@@ -1072,7 +971,6 @@ static const char *record_args[] = {
1072 "record", 971 "record",
1073 "-a", 972 "-a",
1074 "-R", 973 "-R",
1075 "-M",
1076 "-f", 974 "-f",
1077 "-c", "1", 975 "-c", "1",
1078 "-e", "power:power_start", 976 "-e", "power:power_start",
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2eefb33c967..77f556f7604 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -11,6 +11,8 @@
11 11
12static char const *script_name; 12static char const *script_name;
13static char const *generate_script_lang; 13static char const *generate_script_lang;
14static bool debug_ordering;
15static u64 last_timestamp;
14 16
15static int default_start_script(const char *script __unused, 17static int default_start_script(const char *script __unused,
16 int argc __unused, 18 int argc __unused,
@@ -87,6 +89,14 @@ static int process_sample_event(event_t *event, struct perf_session *session)
87 } 89 }
88 90
89 if (session->sample_type & PERF_SAMPLE_RAW) { 91 if (session->sample_type & PERF_SAMPLE_RAW) {
92 if (debug_ordering) {
93 if (data.time < last_timestamp) {
94 pr_err("Samples misordered, previous: %llu "
95 "this: %llu\n", last_timestamp,
96 data.time);
97 }
98 last_timestamp = data.time;
99 }
90 /* 100 /*
91 * FIXME: better resolve from pid from the struct trace_entry 101 * FIXME: better resolve from pid from the struct trace_entry
92 * field, although it should be the same than this perf 102 * field, although it should be the same than this perf
@@ -108,6 +118,7 @@ static struct perf_event_ops event_ops = {
108 .event_type = event__process_event_type, 118 .event_type = event__process_event_type,
109 .tracing_data = event__process_tracing_data, 119 .tracing_data = event__process_tracing_data,
110 .build_id = event__process_build_id, 120 .build_id = event__process_build_id,
121 .ordered_samples = true,
111}; 122};
112 123
113extern volatile int session_done; 124extern volatile int session_done;
@@ -531,6 +542,8 @@ static const struct option options[] = {
531 "generate perf-trace.xx script in specified language"), 542 "generate perf-trace.xx script in specified language"),
532 OPT_STRING('i', "input", &input_name, "file", 543 OPT_STRING('i', "input", &input_name, "file",
533 "input file name"), 544 "input file name"),
545 OPT_BOOLEAN('d', "debug-ordering", &debug_ordering,
546 "check that samples time ordering is monotonic"),
534 547
535 OPT_END() 548 OPT_END()
536}; 549};
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 3b4ec679756..600d3271425 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -418,12 +418,6 @@ parse_single_tracepoint_event(char *sys_name,
418 u64 id; 418 u64 id;
419 int fd; 419 int fd;
420 420
421 attr->sample_type |= PERF_SAMPLE_RAW;
422 attr->sample_type |= PERF_SAMPLE_TIME;
423 attr->sample_type |= PERF_SAMPLE_CPU;
424
425 attr->sample_period = 1;
426
427 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, 421 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
428 sys_name, evt_name); 422 sys_name, evt_name);
429 423
@@ -442,6 +436,13 @@ parse_single_tracepoint_event(char *sys_name,
442 attr->type = PERF_TYPE_TRACEPOINT; 436 attr->type = PERF_TYPE_TRACEPOINT;
443 *strp = evt_name + evt_length; 437 *strp = evt_name + evt_length;
444 438
439 attr->sample_type |= PERF_SAMPLE_RAW;
440 attr->sample_type |= PERF_SAMPLE_TIME;
441 attr->sample_type |= PERF_SAMPLE_CPU;
442
443 attr->sample_period = 1;
444
445
445 return EVT_HANDLED; 446 return EVT_HANDLED;
446} 447}
447 448
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7d88ae5c270..b7aade2184b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -98,6 +98,8 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
98 self->cwdlen = 0; 98 self->cwdlen = 0;
99 self->unknown_events = 0; 99 self->unknown_events = 0;
100 self->kerninfo_root = RB_ROOT; 100 self->kerninfo_root = RB_ROOT;
101 self->ordered_samples.flush_limit = ULLONG_MAX;
102 INIT_LIST_HEAD(&self->ordered_samples.samples_head);
101 103
102 if (mode == O_RDONLY) { 104 if (mode == O_RDONLY) {
103 if (perf_session__open(self, force) < 0) 105 if (perf_session__open(self, force) < 0)
@@ -351,6 +353,178 @@ static event__swap_op event__swap_ops[] = {
351 [PERF_RECORD_HEADER_MAX] = NULL, 353 [PERF_RECORD_HEADER_MAX] = NULL,
352}; 354};
353 355
356struct sample_queue {
357 u64 timestamp;
358 struct sample_event *event;
359 struct list_head list;
360};
361
362#define FLUSH_PERIOD (2 * NSEC_PER_SEC)
363
364static void flush_sample_queue(struct perf_session *s,
365 struct perf_event_ops *ops)
366{
367 struct list_head *head = &s->ordered_samples.samples_head;
368 u64 limit = s->ordered_samples.flush_limit;
369 struct sample_queue *tmp, *iter;
370
371 if (!ops->ordered_samples)
372 return;
373
374 list_for_each_entry_safe(iter, tmp, head, list) {
375 if (iter->timestamp > limit)
376 return;
377
378 if (iter == s->ordered_samples.last_inserted)
379 s->ordered_samples.last_inserted = NULL;
380
381 ops->sample((event_t *)iter->event, s);
382
383 s->ordered_samples.last_flush = iter->timestamp;
384 list_del(&iter->list);
385 free(iter->event);
386 free(iter);
387 }
388}
389
390static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
391{
392 struct sample_queue *iter;
393
394 list_for_each_entry_reverse(iter, head, list) {
395 if (iter->timestamp < new->timestamp) {
396 list_add(&new->list, &iter->list);
397 return;
398 }
399 }
400
401 list_add(&new->list, head);
402}
403
404static void __queue_sample_before(struct sample_queue *new,
405 struct sample_queue *iter,
406 struct list_head *head)
407{
408 list_for_each_entry_continue_reverse(iter, head, list) {
409 if (iter->timestamp < new->timestamp) {
410 list_add(&new->list, &iter->list);
411 return;
412 }
413 }
414
415 list_add(&new->list, head);
416}
417
418static void __queue_sample_after(struct sample_queue *new,
419 struct sample_queue *iter,
420 struct list_head *head)
421{
422 list_for_each_entry_continue(iter, head, list) {
423 if (iter->timestamp > new->timestamp) {
424 list_add_tail(&new->list, &iter->list);
425 return;
426 }
427 }
428 list_add_tail(&new->list, head);
429}
430
431/* The queue is ordered by time */
432static void __queue_sample_event(struct sample_queue *new,
433 struct perf_session *s)
434{
435 struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
436 struct list_head *head = &s->ordered_samples.samples_head;
437
438
439 if (!last_inserted) {
440 __queue_sample_end(new, head);
441 return;
442 }
443
444 /*
445 * Most of the time the current event has a timestamp
446 * very close to the last event inserted, unless we just switched
447 * to another event buffer. Having a sorting based on a list and
448 * on the last inserted event that is close to the current one is
449 * probably more efficient than an rbtree based sorting.
450 */
451 if (last_inserted->timestamp >= new->timestamp)
452 __queue_sample_before(new, last_inserted, head);
453 else
454 __queue_sample_after(new, last_inserted, head);
455}
456
457static int queue_sample_event(event_t *event, struct sample_data *data,
458 struct perf_session *s,
459 struct perf_event_ops *ops)
460{
461 u64 timestamp = data->time;
462 struct sample_queue *new;
463 u64 flush_limit;
464
465
466 if (s->ordered_samples.flush_limit == ULLONG_MAX)
467 s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;
468
469 if (timestamp < s->ordered_samples.last_flush) {
470 printf("Warning: Timestamp below last timeslice flush\n");
471 return -EINVAL;
472 }
473
474 new = malloc(sizeof(*new));
475 if (!new)
476 return -ENOMEM;
477
478 new->timestamp = timestamp;
479
480 new->event = malloc(event->header.size);
481 if (!new->event) {
482 free(new);
483 return -ENOMEM;
484 }
485
486 memcpy(new->event, event, event->header.size);
487
488 __queue_sample_event(new, s);
489 s->ordered_samples.last_inserted = new;
490
491 /*
492 * We want to have a slice of events covering 2 * FLUSH_PERIOD
493 * If FLUSH_PERIOD is big enough, it ensures every events that occured
494 * in the first half of the timeslice have all been buffered and there
495 * are none remaining (we need that because of the weakly ordered
496 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
497 * timeslice, we flush the first half to be gentle with the memory
498 * (the second half can still get new events in the middle, so wait
499 * another period to flush it)
500 */
501 flush_limit = s->ordered_samples.flush_limit;
502
503 if (new->timestamp > flush_limit &&
504 new->timestamp - flush_limit > FLUSH_PERIOD) {
505 s->ordered_samples.flush_limit += FLUSH_PERIOD;
506 flush_sample_queue(s, ops);
507 }
508
509 return 0;
510}
511
512static int perf_session__process_sample(event_t *event, struct perf_session *s,
513 struct perf_event_ops *ops)
514{
515 struct sample_data data;
516
517 if (!ops->ordered_samples)
518 return ops->sample(event, s);
519
520 bzero(&data, sizeof(struct sample_data));
521 event__parse_sample(event, s->sample_type, &data);
522
523 queue_sample_event(event, &data, s, ops);
524
525 return 0;
526}
527
354static int perf_session__process_event(struct perf_session *self, 528static int perf_session__process_event(struct perf_session *self,
355 event_t *event, 529 event_t *event,
356 struct perf_event_ops *ops, 530 struct perf_event_ops *ops,
@@ -371,7 +545,7 @@ static int perf_session__process_event(struct perf_session *self,
371 545
372 switch (event->header.type) { 546 switch (event->header.type) {
373 case PERF_RECORD_SAMPLE: 547 case PERF_RECORD_SAMPLE:
374 return ops->sample(event, self); 548 return perf_session__process_sample(event, self, ops);
375 case PERF_RECORD_MMAP: 549 case PERF_RECORD_MMAP:
376 return ops->mmap(event, self); 550 return ops->mmap(event, self);
377 case PERF_RECORD_COMM: 551 case PERF_RECORD_COMM:
@@ -611,6 +785,9 @@ more:
611 goto more; 785 goto more;
612done: 786done:
613 err = 0; 787 err = 0;
788 /* do the final flush for ordered samples */
789 self->ordered_samples.flush_limit = ULLONG_MAX;
790 flush_sample_queue(self, ops);
614out_err: 791out_err:
615 ui_progress__delete(progress); 792 ui_progress__delete(progress);
616 return err; 793 return err;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5e47c87b926..796e2291ebd 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -8,9 +8,17 @@
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include "../../../include/linux/perf_event.h" 9#include "../../../include/linux/perf_event.h"
10 10
11struct sample_queue;
11struct ip_callchain; 12struct ip_callchain;
12struct thread; 13struct thread;
13 14
15struct ordered_samples {
16 u64 last_flush;
17 u64 flush_limit;
18 struct list_head samples_head;
19 struct sample_queue *last_inserted;
20};
21
14struct perf_session { 22struct perf_session {
15 struct perf_header header; 23 struct perf_header header;
16 unsigned long size; 24 unsigned long size;
@@ -28,6 +36,7 @@ struct perf_session {
28 bool fd_pipe; 36 bool fd_pipe;
29 int cwdlen; 37 int cwdlen;
30 char *cwd; 38 char *cwd;
39 struct ordered_samples ordered_samples;
31 char filename[0]; 40 char filename[0];
32}; 41};
33 42
@@ -47,6 +56,7 @@ struct perf_event_ops {
47 event_type, 56 event_type,
48 tracing_data, 57 tracing_data,
49 build_id; 58 build_id;
59 bool ordered_samples;
50}; 60};
51 61
52struct perf_session *perf_session__new(const char *filename, int mode, bool force); 62struct perf_session *perf_session__new(const char *filename, int mode, bool force);