aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c351
1 files changed, 146 insertions, 205 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ce2d5be4f30e..26b782f26ee1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -11,6 +11,7 @@
11#include "util/trace-event.h" 11#include "util/trace-event.h"
12 12
13#include "util/debug.h" 13#include "util/debug.h"
14#include "util/data_map.h"
14 15
15#include <sys/types.h> 16#include <sys/types.h>
16#include <sys/prctl.h> 17#include <sys/prctl.h>
@@ -20,14 +21,6 @@
20#include <math.h> 21#include <math.h>
21 22
22static char const *input_name = "perf.data"; 23static char const *input_name = "perf.data";
23static int input;
24static unsigned long page_size;
25static unsigned long mmap_window = 32;
26
27static unsigned long total_comm = 0;
28
29static struct rb_root threads;
30static struct thread *last_match;
31 24
32static struct perf_header *header; 25static struct perf_header *header;
33static u64 sample_type; 26static u64 sample_type;
@@ -35,11 +28,11 @@ static u64 sample_type;
35static char default_sort_order[] = "avg, max, switch, runtime"; 28static char default_sort_order[] = "avg, max, switch, runtime";
36static char *sort_order = default_sort_order; 29static char *sort_order = default_sort_order;
37 30
31static int profile_cpu = -1;
32
38#define PR_SET_NAME 15 /* Set process name */ 33#define PR_SET_NAME 15 /* Set process name */
39#define MAX_CPUS 4096 34#define MAX_CPUS 4096
40 35
41#define BUG_ON(x) assert(!(x))
42
43static u64 run_measurement_overhead; 36static u64 run_measurement_overhead;
44static u64 sleep_measurement_overhead; 37static u64 sleep_measurement_overhead;
45 38
@@ -74,6 +67,7 @@ enum sched_event_type {
74 SCHED_EVENT_RUN, 67 SCHED_EVENT_RUN,
75 SCHED_EVENT_SLEEP, 68 SCHED_EVENT_SLEEP,
76 SCHED_EVENT_WAKEUP, 69 SCHED_EVENT_WAKEUP,
70 SCHED_EVENT_MIGRATION,
77}; 71};
78 72
79struct sched_atom { 73struct sched_atom {
@@ -226,7 +220,7 @@ static void calibrate_sleep_measurement_overhead(void)
226static struct sched_atom * 220static struct sched_atom *
227get_new_event(struct task_desc *task, u64 timestamp) 221get_new_event(struct task_desc *task, u64 timestamp)
228{ 222{
229 struct sched_atom *event = calloc(1, sizeof(*event)); 223 struct sched_atom *event = zalloc(sizeof(*event));
230 unsigned long idx = task->nr_events; 224 unsigned long idx = task->nr_events;
231 size_t size; 225 size_t size;
232 226
@@ -294,7 +288,7 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
294 return; 288 return;
295 } 289 }
296 290
297 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem)); 291 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
298 sem_init(wakee_event->wait_sem, 0, 0); 292 sem_init(wakee_event->wait_sem, 0, 0);
299 wakee_event->specific_wait = 1; 293 wakee_event->specific_wait = 1;
300 event->wait_sem = wakee_event->wait_sem; 294 event->wait_sem = wakee_event->wait_sem;
@@ -324,7 +318,7 @@ static struct task_desc *register_pid(unsigned long pid, const char *comm)
324 if (task) 318 if (task)
325 return task; 319 return task;
326 320
327 task = calloc(1, sizeof(*task)); 321 task = zalloc(sizeof(*task));
328 task->pid = pid; 322 task->pid = pid;
329 task->nr = nr_tasks; 323 task->nr = nr_tasks;
330 strcpy(task->comm, comm); 324 strcpy(task->comm, comm);
@@ -398,6 +392,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
398 ret = sem_post(atom->wait_sem); 392 ret = sem_post(atom->wait_sem);
399 BUG_ON(ret); 393 BUG_ON(ret);
400 break; 394 break;
395 case SCHED_EVENT_MIGRATION:
396 break;
401 default: 397 default:
402 BUG_ON(1); 398 BUG_ON(1);
403 } 399 }
@@ -632,29 +628,6 @@ static void test_calibrations(void)
632 printf("the sleep test took %Ld nsecs\n", T1-T0); 628 printf("the sleep test took %Ld nsecs\n", T1-T0);
633} 629}
634 630
635static int
636process_comm_event(event_t *event, unsigned long offset, unsigned long head)
637{
638 struct thread *thread;
639
640 thread = threads__findnew(event->comm.pid, &threads, &last_match);
641
642 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
643 (void *)(offset + head),
644 (void *)(long)(event->header.size),
645 event->comm.comm, event->comm.pid);
646
647 if (thread == NULL ||
648 thread__set_comm(thread, event->comm.comm)) {
649 dump_printf("problem processing perf_event_comm, skipping event.\n");
650 return -1;
651 }
652 total_comm++;
653
654 return 0;
655}
656
657
658struct raw_event_sample { 631struct raw_event_sample {
659 u32 size; 632 u32 size;
660 char data[0]; 633 char data[0];
@@ -745,6 +718,22 @@ struct trace_fork_event {
745 u32 child_pid; 718 u32 child_pid;
746}; 719};
747 720
721struct trace_migrate_task_event {
722 u32 size;
723
724 u16 common_type;
725 u8 common_flags;
726 u8 common_preempt_count;
727 u32 common_pid;
728 u32 common_tgid;
729
730 char comm[16];
731 u32 pid;
732
733 u32 prio;
734 u32 cpu;
735};
736
748struct trace_sched_handler { 737struct trace_sched_handler {
749 void (*switch_event)(struct trace_switch_event *, 738 void (*switch_event)(struct trace_switch_event *,
750 struct event *, 739 struct event *,
@@ -769,6 +758,12 @@ struct trace_sched_handler {
769 int cpu, 758 int cpu,
770 u64 timestamp, 759 u64 timestamp,
771 struct thread *thread); 760 struct thread *thread);
761
762 void (*migrate_task_event)(struct trace_migrate_task_event *,
763 struct event *,
764 int cpu,
765 u64 timestamp,
766 struct thread *thread);
772}; 767};
773 768
774 769
@@ -941,9 +936,7 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
941 936
942static void thread_atoms_insert(struct thread *thread) 937static void thread_atoms_insert(struct thread *thread)
943{ 938{
944 struct work_atoms *atoms; 939 struct work_atoms *atoms = zalloc(sizeof(*atoms));
945
946 atoms = calloc(sizeof(*atoms), 1);
947 if (!atoms) 940 if (!atoms)
948 die("No memory"); 941 die("No memory");
949 942
@@ -975,9 +968,7 @@ add_sched_out_event(struct work_atoms *atoms,
975 char run_state, 968 char run_state,
976 u64 timestamp) 969 u64 timestamp)
977{ 970{
978 struct work_atom *atom; 971 struct work_atom *atom = zalloc(sizeof(*atom));
979
980 atom = calloc(sizeof(*atom), 1);
981 if (!atom) 972 if (!atom)
982 die("Non memory"); 973 die("Non memory");
983 974
@@ -1058,8 +1049,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
1058 die("hm, delta: %Ld < 0 ?\n", delta); 1049 die("hm, delta: %Ld < 0 ?\n", delta);
1059 1050
1060 1051
1061 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1052 sched_out = threads__findnew(switch_event->prev_pid);
1062 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1053 sched_in = threads__findnew(switch_event->next_pid);
1063 1054
1064 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1055 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1065 if (!out_events) { 1056 if (!out_events) {
@@ -1092,13 +1083,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1092 u64 timestamp, 1083 u64 timestamp,
1093 struct thread *this_thread __used) 1084 struct thread *this_thread __used)
1094{ 1085{
1095 struct work_atoms *atoms; 1086 struct thread *thread = threads__findnew(runtime_event->pid);
1096 struct thread *thread; 1087 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1097 1088
1098 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1089 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1099
1100 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1101 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1102 if (!atoms) { 1090 if (!atoms) {
1103 thread_atoms_insert(thread); 1091 thread_atoms_insert(thread);
1104 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1092 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
@@ -1125,7 +1113,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1125 if (!wakeup_event->success) 1113 if (!wakeup_event->success)
1126 return; 1114 return;
1127 1115
1128 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); 1116 wakee = threads__findnew(wakeup_event->pid);
1129 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1117 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1130 if (!atoms) { 1118 if (!atoms) {
1131 thread_atoms_insert(wakee); 1119 thread_atoms_insert(wakee);
@@ -1139,7 +1127,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1139 1127
1140 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1128 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1141 1129
1142 if (atom->state != THREAD_SLEEPING) 1130 /*
1131 * You WILL be missing events if you've recorded only
1132 * one CPU, or are only looking at only one, so don't
1133 * make useless noise.
1134 */
1135 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1143 nr_state_machine_bugs++; 1136 nr_state_machine_bugs++;
1144 1137
1145 nr_timestamps++; 1138 nr_timestamps++;
@@ -1152,11 +1145,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1152 atom->wake_up_time = timestamp; 1145 atom->wake_up_time = timestamp;
1153} 1146}
1154 1147
1148static void
1149latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1150 struct event *__event __used,
1151 int cpu __used,
1152 u64 timestamp,
1153 struct thread *thread __used)
1154{
1155 struct work_atoms *atoms;
1156 struct work_atom *atom;
1157 struct thread *migrant;
1158
1159 /*
1160 * Only need to worry about migration when profiling one CPU.
1161 */
1162 if (profile_cpu == -1)
1163 return;
1164
1165 migrant = threads__findnew(migrate_task_event->pid);
1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167 if (!atoms) {
1168 thread_atoms_insert(migrant);
1169 register_pid(migrant->pid, migrant->comm);
1170 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171 if (!atoms)
1172 die("migration-event: Internal tree error");
1173 add_sched_out_event(atoms, 'R', timestamp);
1174 }
1175
1176 BUG_ON(list_empty(&atoms->work_list));
1177
1178 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1179 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1180
1181 nr_timestamps++;
1182
1183 if (atom->sched_out_time > timestamp)
1184 nr_unordered_timestamps++;
1185}
1186
1155static struct trace_sched_handler lat_ops = { 1187static struct trace_sched_handler lat_ops = {
1156 .wakeup_event = latency_wakeup_event, 1188 .wakeup_event = latency_wakeup_event,
1157 .switch_event = latency_switch_event, 1189 .switch_event = latency_switch_event,
1158 .runtime_event = latency_runtime_event, 1190 .runtime_event = latency_runtime_event,
1159 .fork_event = latency_fork_event, 1191 .fork_event = latency_fork_event,
1192 .migrate_task_event = latency_migrate_task_event,
1160}; 1193};
1161 1194
1162static void output_lat_thread(struct work_atoms *work_list) 1195static void output_lat_thread(struct work_atoms *work_list)
@@ -1385,8 +1418,8 @@ map_switch_event(struct trace_switch_event *switch_event,
1385 die("hm, delta: %Ld < 0 ?\n", delta); 1418 die("hm, delta: %Ld < 0 ?\n", delta);
1386 1419
1387 1420
1388 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1421 sched_out = threads__findnew(switch_event->prev_pid);
1389 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1422 sched_in = threads__findnew(switch_event->next_pid);
1390 1423
1391 curr_thread[this_cpu] = sched_in; 1424 curr_thread[this_cpu] = sched_in;
1392 1425
@@ -1517,6 +1550,26 @@ process_sched_exit_event(struct event *event,
1517} 1550}
1518 1551
1519static void 1552static void
1553process_sched_migrate_task_event(struct raw_event_sample *raw,
1554 struct event *event,
1555 int cpu __used,
1556 u64 timestamp __used,
1557 struct thread *thread __used)
1558{
1559 struct trace_migrate_task_event migrate_task_event;
1560
1561 FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
1562
1563 FILL_ARRAY(migrate_task_event, comm, event, raw->data);
1564 FILL_FIELD(migrate_task_event, pid, event, raw->data);
1565 FILL_FIELD(migrate_task_event, prio, event, raw->data);
1566 FILL_FIELD(migrate_task_event, cpu, event, raw->data);
1567
1568 if (trace_handler->migrate_task_event)
1569 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1570}
1571
1572static void
1520process_raw_event(event_t *raw_event __used, void *more_data, 1573process_raw_event(event_t *raw_event __used, void *more_data,
1521 int cpu, u64 timestamp, struct thread *thread) 1574 int cpu, u64 timestamp, struct thread *thread)
1522{ 1575{
@@ -1539,23 +1592,23 @@ process_raw_event(event_t *raw_event __used, void *more_data,
1539 process_sched_fork_event(raw, event, cpu, timestamp, thread); 1592 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1540 if (!strcmp(event->name, "sched_process_exit")) 1593 if (!strcmp(event->name, "sched_process_exit"))
1541 process_sched_exit_event(event, cpu, timestamp, thread); 1594 process_sched_exit_event(event, cpu, timestamp, thread);
1595 if (!strcmp(event->name, "sched_migrate_task"))
1596 process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
1542} 1597}
1543 1598
1544static int 1599static int process_sample_event(event_t *event)
1545process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1546{ 1600{
1547 char level;
1548 int show = 0;
1549 struct dso *dso = NULL;
1550 struct thread *thread; 1601 struct thread *thread;
1551 u64 ip = event->ip.ip; 1602 u64 ip = event->ip.ip;
1552 u64 timestamp = -1; 1603 u64 timestamp = -1;
1553 u32 cpu = -1; 1604 u32 cpu = -1;
1554 u64 period = 1; 1605 u64 period = 1;
1555 void *more_data = event->ip.__more_data; 1606 void *more_data = event->ip.__more_data;
1556 int cpumode;
1557 1607
1558 thread = threads__findnew(event->ip.pid, &threads, &last_match); 1608 if (!(sample_type & PERF_SAMPLE_RAW))
1609 return 0;
1610
1611 thread = threads__findnew(event->ip.pid);
1559 1612
1560 if (sample_type & PERF_SAMPLE_TIME) { 1613 if (sample_type & PERF_SAMPLE_TIME) {
1561 timestamp = *(u64 *)more_data; 1614 timestamp = *(u64 *)more_data;
@@ -1573,177 +1626,64 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1573 more_data += sizeof(u64); 1626 more_data += sizeof(u64);
1574 } 1627 }
1575 1628
1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1629 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset + head),
1578 (void *)(long)(event->header.size),
1579 event->header.misc, 1630 event->header.misc,
1580 event->ip.pid, event->ip.tid, 1631 event->ip.pid, event->ip.tid,
1581 (void *)(long)ip, 1632 (void *)(long)ip,
1582 (long long)period); 1633 (long long)period);
1583 1634
1584 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1585
1586 if (thread == NULL) { 1635 if (thread == NULL) {
1587 eprintf("problem processing %d event, skipping it.\n", 1636 pr_debug("problem processing %d event, skipping it.\n",
1588 event->header.type); 1637 event->header.type);
1589 return -1; 1638 return -1;
1590 } 1639 }
1591 1640
1592 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1641 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1593
1594 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1595 show = SHOW_KERNEL;
1596 level = 'k';
1597
1598 dso = kernel_dso;
1599
1600 dump_printf(" ...... dso: %s\n", dso->name);
1601
1602 } else if (cpumode == PERF_RECORD_MISC_USER) {
1603
1604 show = SHOW_USER;
1605 level = '.';
1606
1607 } else {
1608 show = SHOW_HV;
1609 level = 'H';
1610
1611 dso = hypervisor_dso;
1612 1642
1613 dump_printf(" ...... dso: [hypervisor]\n"); 1643 if (profile_cpu != -1 && profile_cpu != (int) cpu)
1614 } 1644 return 0;
1615 1645
1616 if (sample_type & PERF_SAMPLE_RAW) 1646 process_raw_event(event, more_data, cpu, timestamp, thread);
1617 process_raw_event(event, more_data, cpu, timestamp, thread);
1618 1647
1619 return 0; 1648 return 0;
1620} 1649}
1621 1650
1622static int 1651static int process_lost_event(event_t *event __used)
1623process_event(event_t *event, unsigned long offset, unsigned long head)
1624{ 1652{
1625 trace_event(event); 1653 nr_lost_chunks++;
1626 1654 nr_lost_events += event->lost.lost;
1627 nr_events++;
1628 switch (event->header.type) {
1629 case PERF_RECORD_MMAP:
1630 return 0;
1631 case PERF_RECORD_LOST:
1632 nr_lost_chunks++;
1633 nr_lost_events += event->lost.lost;
1634 return 0;
1635
1636 case PERF_RECORD_COMM:
1637 return process_comm_event(event, offset, head);
1638 1655
1639 case PERF_RECORD_EXIT ... PERF_RECORD_READ: 1656 return 0;
1640 return 0; 1657}
1641 1658
1642 case PERF_RECORD_SAMPLE: 1659static int sample_type_check(u64 type)
1643 return process_sample_event(event, offset, head); 1660{
1661 sample_type = type;
1644 1662
1645 case PERF_RECORD_MAX: 1663 if (!(sample_type & PERF_SAMPLE_RAW)) {
1646 default: 1664 fprintf(stderr,
1665 "No trace sample to read. Did you call perf record "
1666 "without -R?");
1647 return -1; 1667 return -1;
1648 } 1668 }
1649 1669
1650 return 0; 1670 return 0;
1651} 1671}
1652 1672
1673static struct perf_file_handler file_handler = {
1674 .process_sample_event = process_sample_event,
1675 .process_comm_event = event__process_comm,
1676 .process_lost_event = process_lost_event,
1677 .sample_type_check = sample_type_check,
1678};
1679
1653static int read_events(void) 1680static int read_events(void)
1654{ 1681{
1655 int ret, rc = EXIT_FAILURE; 1682 register_idle_thread();
1656 unsigned long offset = 0; 1683 register_perf_file_handler(&file_handler);
1657 unsigned long head = 0;
1658 struct stat perf_stat;
1659 event_t *event;
1660 uint32_t size;
1661 char *buf;
1662
1663 trace_report();
1664 register_idle_thread(&threads, &last_match);
1665
1666 input = open(input_name, O_RDONLY);
1667 if (input < 0) {
1668 perror("failed to open file");
1669 exit(-1);
1670 }
1671
1672 ret = fstat(input, &perf_stat);
1673 if (ret < 0) {
1674 perror("failed to stat file");
1675 exit(-1);
1676 }
1677
1678 if (!perf_stat.st_size) {
1679 fprintf(stderr, "zero-sized file, nothing to do!\n");
1680 exit(0);
1681 }
1682 header = perf_header__read(input);
1683 head = header->data_offset;
1684 sample_type = perf_header__sample_type(header);
1685
1686 if (!(sample_type & PERF_SAMPLE_RAW))
1687 die("No trace sample to read. Did you call perf record "
1688 "without -R?");
1689
1690 if (load_kernel() < 0) {
1691 perror("failed to load kernel symbols");
1692 return EXIT_FAILURE;
1693 }
1694
1695remap:
1696 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1697 MAP_SHARED, input, offset);
1698 if (buf == MAP_FAILED) {
1699 perror("failed to mmap file");
1700 exit(-1);
1701 }
1702
1703more:
1704 event = (event_t *)(buf + head);
1705
1706 size = event->header.size;
1707 if (!size)
1708 size = 8;
1709
1710 if (head + event->header.size >= page_size * mmap_window) {
1711 unsigned long shift = page_size * (head / page_size);
1712 int res;
1713
1714 res = munmap(buf, page_size * mmap_window);
1715 assert(res == 0);
1716
1717 offset += shift;
1718 head -= shift;
1719 goto remap;
1720 }
1721
1722 size = event->header.size;
1723
1724
1725 if (!size || process_event(event, offset, head) < 0) {
1726
1727 /*
1728 * assume we lost track of the stream, check alignment, and
1729 * increment a single u64 in the hope to catch on again 'soon'.
1730 */
1731
1732 if (unlikely(head & 7))
1733 head &= ~7ULL;
1734
1735 size = 8;
1736 }
1737
1738 head += size;
1739
1740 if (offset + head < (unsigned long)perf_stat.st_size)
1741 goto more;
1742
1743 rc = EXIT_SUCCESS;
1744 close(input);
1745 1684
1746 return rc; 1685 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
1686 &event__cwdlen, &event__cwd);
1747} 1687}
1748 1688
1749static void print_bad_events(void) 1689static void print_bad_events(void)
@@ -1883,6 +1823,8 @@ static const struct option latency_options[] = {
1883 "sort by key(s): runtime, switch, avg, max"), 1823 "sort by key(s): runtime, switch, avg, max"),
1884 OPT_BOOLEAN('v', "verbose", &verbose, 1824 OPT_BOOLEAN('v', "verbose", &verbose,
1885 "be more verbose (show symbol address, etc)"), 1825 "be more verbose (show symbol address, etc)"),
1826 OPT_INTEGER('C', "CPU", &profile_cpu,
1827 "CPU to profile on"),
1886 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1828 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1887 "dump raw trace in ASCII"), 1829 "dump raw trace in ASCII"),
1888 OPT_END() 1830 OPT_END()
@@ -1960,8 +1902,7 @@ static int __cmd_record(int argc, const char **argv)
1960 1902
1961int cmd_sched(int argc, const char **argv, const char *prefix __used) 1903int cmd_sched(int argc, const char **argv, const char *prefix __used)
1962{ 1904{
1963 symbol__init(); 1905 symbol__init(0);
1964 page_size = getpagesize();
1965 1906
1966 argc = parse_options(argc, argv, sched_options, sched_usage, 1907 argc = parse_options(argc, argv, sched_options, sched_usage,
1967 PARSE_OPT_STOP_AT_NON_OPTION); 1908 PARSE_OPT_STOP_AT_NON_OPTION);