aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/apb_timer.c14
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--include/trace/events/module.h10
-rw-r--r--init/Kconfig15
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/rcutiny.c3
-rw-r--r--kernel/srcu.c15
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/trace/trace_syscalls.c33
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/builtin-record.c12
-rw-r--r--tools/perf/builtin-sched.c18
-rw-r--r--tools/perf/builtin-stat.c1
-rw-r--r--tools/perf/builtin-top.c5
-rw-r--r--tools/perf/perf.c2
18 files changed, 84 insertions, 79 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 47ae4a751a59..3ed5ad92b029 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2068,6 +2068,7 @@ config OLPC
2068 bool "One Laptop Per Child support" 2068 bool "One Laptop Per Child support"
2069 select GPIOLIB 2069 select GPIOLIB
2070 select OLPC_OPENFIRMWARE 2070 select OLPC_OPENFIRMWARE
2071 depends on !X86_64 && !X86_PAE
2071 ---help--- 2072 ---help---
2072 Add support for detecting the unique features of the OLPC 2073 Add support for detecting the unique features of the OLPC
2073 XO hardware. 2074 XO hardware.
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 7c9ab59653e8..51ef31a89be9 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -313,14 +313,16 @@ static void apbt_setup_irq(struct apbt_dev *adev)
313 if (adev->irq == 0) 313 if (adev->irq == 0)
314 return; 314 return;
315 315
316 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
317 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
318 /* APB timer irqs are set up as mp_irqs, timer is edge type */
319 __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
320
316 if (system_state == SYSTEM_BOOTING) { 321 if (system_state == SYSTEM_BOOTING) {
317 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
318 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
319 /* APB timer irqs are set up as mp_irqs, timer is edge type */
320 __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
321 if (request_irq(adev->irq, apbt_interrupt_handler, 322 if (request_irq(adev->irq, apbt_interrupt_handler,
322 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 323 IRQF_TIMER | IRQF_DISABLED |
323 adev->name, adev)) { 324 IRQF_NOBALANCING,
325 adev->name, adev)) {
324 printk(KERN_ERR "Failed request IRQ for APBT%d\n", 326 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
325 adev->num); 327 adev->num);
326 } 328 }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 823f79a17ad1..ffe5755caa8b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -464,7 +464,7 @@ unsigned long native_calibrate_tsc(void)
464 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 464 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
465 465
466 /* hpet or pmtimer available ? */ 466 /* hpet or pmtimer available ? */
467 if (!hpet && !ref1 && !ref2) 467 if (ref1 == ref2)
468 continue; 468 continue;
469 469
470 /* Check, whether the sampling was disturbed by an SMI */ 470 /* Check, whether the sampling was disturbed by an SMI */
@@ -935,7 +935,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
935 tsc_stop = tsc_read_refs(&ref_stop, hpet); 935 tsc_stop = tsc_read_refs(&ref_stop, hpet);
936 936
937 /* hpet or pmtimer available ? */ 937 /* hpet or pmtimer available ? */
938 if (!hpet && !ref_start && !ref_stop) 938 if (ref_start == ref_stop)
939 goto out; 939 goto out;
940 940
941 /* Check, whether the sampling was disturbed by an SMI */ 941 /* Check, whether the sampling was disturbed by an SMI */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926afec110..5eb25eb3ea48 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id,
275int __init acpi_numa_init(void) 275int __init acpi_numa_init(void)
276{ 276{
277 int ret = 0; 277 int ret = 0;
278 int nr_cpu_entries = nr_cpu_ids;
279 278
280#ifdef CONFIG_X86
281 /* 279 /*
282 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= 280 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
283 * SRAT cpu entries could have different order with that in MADT. 281 * SRAT cpu entries could have different order with that in MADT.
284 * So go over all cpu entries in SRAT to get apicid to node mapping. 282 * So go over all cpu entries in SRAT to get apicid to node mapping.
285 */ 283 */
286 nr_cpu_entries = MAX_LOCAL_APIC;
287#endif
288 284
289 /* SRAT: Static Resource Affinity Table */ 285 /* SRAT: Static Resource Affinity Table */
290 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 286 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
291 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 287 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
292 acpi_parse_x2apic_affinity, nr_cpu_entries); 288 acpi_parse_x2apic_affinity, 0);
293 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 289 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
294 acpi_parse_processor_affinity, nr_cpu_entries); 290 acpi_parse_processor_affinity, 0);
295 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 291 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
296 acpi_parse_memory_affinity, 292 acpi_parse_memory_affinity,
297 NR_NODE_MEMBLKS); 293 NR_NODE_MEMBLKS);
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index c7bb2f0482fe..c6bae36547e5 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -1,5 +1,15 @@
1/*
2 * Because linux/module.h has tracepoints in the header, and ftrace.h
3 * eventually includes this file, define_trace.h includes linux/module.h
4 * But we do not want the module.h to override the TRACE_SYSTEM macro
5 * variable that define_trace.h is processing, so we only set it
6 * when module events are being processed, which would happen when
7 * CREATE_TRACE_POINTS is defined.
8 */
9#ifdef CREATE_TRACE_POINTS
1#undef TRACE_SYSTEM 10#undef TRACE_SYSTEM
2#define TRACE_SYSTEM module 11#define TRACE_SYSTEM module
12#endif
3 13
4#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ) 14#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_MODULE_H 15#define _TRACE_MODULE_H
diff --git a/init/Kconfig b/init/Kconfig
index 4f6cdbf523eb..4e337906016e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -515,21 +515,6 @@ config RCU_BOOST_DELAY
515 515
516 Accept the default if unsure. 516 Accept the default if unsure.
517 517
518config SRCU_SYNCHRONIZE_DELAY
519 int "Microseconds to delay before waiting for readers"
520 range 0 20
521 default 10
522 help
523 This option controls how long SRCU delays before entering its
524 loop waiting on SRCU readers. The purpose of this loop is
525 to avoid the unconditional context-switch penalty that would
526 otherwise be incurred if there was an active SRCU reader,
527 in a manner similar to adaptive locking schemes. This should
528 be set to be a bit longer than the common-case SRCU read-side
529 critical-section overhead.
530
531 Accept the default if unsure.
532
533endmenu # "RCU Subsystem" 518endmenu # "RCU Subsystem"
534 519
535config IKCONFIG 520config IKCONFIG
diff --git a/kernel/futex.c b/kernel/futex.c
index 52075633373f..b766d28accd6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -826,10 +826,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
826 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 826 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
827 827
828 /* 828 /*
829 * This happens when we have stolen the lock and the original 829 * It is possible that the next waiter (the one that brought
830 * pending owner did not enqueue itself back on the rt_mutex. 830 * this owner to the kernel) timed out and is no longer
831 * Thats not a tragedy. We know that way, that a lock waiter 831 * waiting on the lock.
832 * is on the fly. We make the futex_q waiter the pending owner.
833 */ 832 */
834 if (!new_owner) 833 if (!new_owner)
835 new_owner = this->task; 834 new_owner = this->task;
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 034493724749..0c343b9a46d5 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -189,7 +189,8 @@ static int rcu_kthread(void *arg)
189 unsigned long flags; 189 unsigned long flags;
190 190
191 for (;;) { 191 for (;;) {
192 wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); 192 wait_event_interruptible(rcu_kthread_wq,
193 have_rcu_kthread_work != 0);
193 morework = rcu_boost(); 194 morework = rcu_boost();
194 local_irq_save(flags); 195 local_irq_save(flags);
195 work = have_rcu_kthread_work; 196 work = have_rcu_kthread_work;
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 98d8c1e80edb..73ce23feaea9 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -156,6 +156,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
156EXPORT_SYMBOL_GPL(__srcu_read_unlock); 156EXPORT_SYMBOL_GPL(__srcu_read_unlock);
157 157
158/* 158/*
159 * We use an adaptive strategy for synchronize_srcu() and especially for
160 * synchronize_srcu_expedited(). We spin for a fixed time period
161 * (defined below) to allow SRCU readers to exit their read-side critical
162 * sections. If there are still some readers after 10 microseconds,
163 * we repeatedly block for 1-millisecond time periods. This approach
164 * has done well in testing, so there is no need for a config parameter.
165 */
166#define SYNCHRONIZE_SRCU_READER_DELAY 10
167
168/*
159 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 169 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
160 */ 170 */
161static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) 171static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
@@ -207,11 +217,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
207 * will have finished executing. We initially give readers 217 * will have finished executing. We initially give readers
208 * an arbitrarily chosen 10 microseconds to get out of their 218 * an arbitrarily chosen 10 microseconds to get out of their
209 * SRCU read-side critical sections, then loop waiting 1/HZ 219 * SRCU read-side critical sections, then loop waiting 1/HZ
210 * seconds per iteration. 220 * seconds per iteration. The 10-microsecond value has done
221 * very well in testing.
211 */ 222 */
212 223
213 if (srcu_readers_active_idx(sp, idx)) 224 if (srcu_readers_active_idx(sp, idx))
214 udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY); 225 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
215 while (srcu_readers_active_idx(sp, idx)) 226 while (srcu_readers_active_idx(sp, idx))
216 schedule_timeout_interruptible(1); 227 schedule_timeout_interruptible(1);
217 228
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c50a034de30f..6519cf62d9cd 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
113 * @shift: pointer to shift variable 113 * @shift: pointer to shift variable
114 * @from: frequency to convert from 114 * @from: frequency to convert from
115 * @to: frequency to convert to 115 * @to: frequency to convert to
116 * @minsec: guaranteed runtime conversion range in seconds 116 * @maxsec: guaranteed runtime conversion range in seconds
117 * 117 *
118 * The function evaluates the shift/mult pair for the scaled math 118 * The function evaluates the shift/mult pair for the scaled math
119 * operations of clocksources and clockevents. 119 * operations of clocksources and clockevents.
@@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
123 * event @to is the counter frequency and @from is NSEC_PER_SEC. 123 * event @to is the counter frequency and @from is NSEC_PER_SEC.
124 * 124 *
125 * The @minsec conversion range argument controls the time frame in 125 * The @maxsec conversion range argument controls the time frame in
126 * seconds which must be covered by the runtime conversion with the 126 * seconds which must be covered by the runtime conversion with the
127 * calculated mult and shift factors. This guarantees that no 64bit 127 * calculated mult and shift factors. This guarantees that no 64bit
128 * overflow happens when the input value of the conversion is 128 * overflow happens when the input value of the conversion is
@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
131 * factors. 131 * factors.
132 */ 132 */
133void 133void
134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) 134clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
135{ 135{
136 u64 tmp; 136 u64 tmp;
137 u32 sft, sftacc= 32; 137 u32 sft, sftacc= 32;
@@ -140,7 +140,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
140 * Calculate the shift factor which is limiting the conversion 140 * Calculate the shift factor which is limiting the conversion
141 * range: 141 * range:
142 */ 142 */
143 tmp = ((u64)minsec * from) >> 32; 143 tmp = ((u64)maxsec * from) >> 32;
144 while (tmp) { 144 while (tmp) {
145 tmp >>=1; 145 tmp >>=1;
146 sftacc--; 146 sftacc--;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5536aaf3ba36..d27c7562902c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -49,7 +49,7 @@ struct timekeeper {
49 u32 mult; 49 u32 mult;
50}; 50};
51 51
52struct timekeeper timekeeper; 52static struct timekeeper timekeeper;
53 53
54/** 54/**
55 * timekeeper_setup_internals - Set up internals to use clocksource clock. 55 * timekeeper_setup_internals - Set up internals to use clocksource clock.
@@ -164,7 +164,7 @@ static struct timespec total_sleep_time;
164/* 164/*
165 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. 165 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
166 */ 166 */
167struct timespec raw_time; 167static struct timespec raw_time;
168 168
169/* flag for if timekeeping is suspended */ 169/* flag for if timekeeping is suspended */
170int __read_mostly timekeeping_suspended; 170int __read_mostly timekeeping_suspended;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index bac752f0cfb5..b706529b4fc7 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -23,9 +23,6 @@ static int syscall_exit_register(struct ftrace_event_call *event,
23static int syscall_enter_define_fields(struct ftrace_event_call *call); 23static int syscall_enter_define_fields(struct ftrace_event_call *call);
24static int syscall_exit_define_fields(struct ftrace_event_call *call); 24static int syscall_exit_define_fields(struct ftrace_event_call *call);
25 25
26/* All syscall exit events have the same fields */
27static LIST_HEAD(syscall_exit_fields);
28
29static struct list_head * 26static struct list_head *
30syscall_get_enter_fields(struct ftrace_event_call *call) 27syscall_get_enter_fields(struct ftrace_event_call *call)
31{ 28{
@@ -34,34 +31,28 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
34 return &entry->enter_fields; 31 return &entry->enter_fields;
35} 32}
36 33
37static struct list_head *
38syscall_get_exit_fields(struct ftrace_event_call *call)
39{
40 return &syscall_exit_fields;
41}
42
43struct trace_event_functions enter_syscall_print_funcs = { 34struct trace_event_functions enter_syscall_print_funcs = {
44 .trace = print_syscall_enter, 35 .trace = print_syscall_enter,
45}; 36};
46 37
47struct trace_event_functions exit_syscall_print_funcs = { 38struct trace_event_functions exit_syscall_print_funcs = {
48 .trace = print_syscall_exit, 39 .trace = print_syscall_exit,
49}; 40};
50 41
51struct ftrace_event_class event_class_syscall_enter = { 42struct ftrace_event_class event_class_syscall_enter = {
52 .system = "syscalls", 43 .system = "syscalls",
53 .reg = syscall_enter_register, 44 .reg = syscall_enter_register,
54 .define_fields = syscall_enter_define_fields, 45 .define_fields = syscall_enter_define_fields,
55 .get_fields = syscall_get_enter_fields, 46 .get_fields = syscall_get_enter_fields,
56 .raw_init = init_syscall_trace, 47 .raw_init = init_syscall_trace,
57}; 48};
58 49
59struct ftrace_event_class event_class_syscall_exit = { 50struct ftrace_event_class event_class_syscall_exit = {
60 .system = "syscalls", 51 .system = "syscalls",
61 .reg = syscall_exit_register, 52 .reg = syscall_exit_register,
62 .define_fields = syscall_exit_define_fields, 53 .define_fields = syscall_exit_define_fields,
63 .get_fields = syscall_get_exit_fields, 54 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
64 .raw_init = init_syscall_trace, 55 .raw_init = init_syscall_trace,
65}; 56};
66 57
67extern unsigned long __start_syscalls_metadata[]; 58extern unsigned long __start_syscalls_metadata[];
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 52462ae26455..e032716c839b 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -61,6 +61,9 @@ OPTIONS
61-r:: 61-r::
62--realtime=:: 62--realtime=::
63 Collect data with this RT SCHED_FIFO priority. 63 Collect data with this RT SCHED_FIFO priority.
64-D::
65--no-delay::
66 Collect data without buffering.
64-A:: 67-A::
65--append:: 68--append::
66 Append to the output file to do incremental profiling. 69 Append to the output file to do incremental profiling.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7069bd3e90b3..df6064ad9bf2 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -49,6 +49,7 @@ static int pipe_output = 0;
49static const char *output_name = "perf.data"; 49static const char *output_name = "perf.data";
50static int group = 0; 50static int group = 0;
51static int realtime_prio = 0; 51static int realtime_prio = 0;
52static bool nodelay = false;
52static bool raw_samples = false; 53static bool raw_samples = false;
53static bool sample_id_all_avail = true; 54static bool sample_id_all_avail = true;
54static bool system_wide = false; 55static bool system_wide = false;
@@ -307,6 +308,11 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
307 attr->sample_type |= PERF_SAMPLE_CPU; 308 attr->sample_type |= PERF_SAMPLE_CPU;
308 } 309 }
309 310
311 if (nodelay) {
312 attr->watermark = 0;
313 attr->wakeup_events = 1;
314 }
315
310 attr->mmap = track; 316 attr->mmap = track;
311 attr->comm = track; 317 attr->comm = track;
312 attr->inherit = !no_inherit; 318 attr->inherit = !no_inherit;
@@ -331,9 +337,6 @@ try_again:
331 else if (err == ENODEV && cpu_list) { 337 else if (err == ENODEV && cpu_list) {
332 die("No such device - did you specify" 338 die("No such device - did you specify"
333 " an out-of-range profile CPU?\n"); 339 " an out-of-range profile CPU?\n");
334 } else if (err == ENOENT) {
335 die("%s event is not supported. ",
336 event_name(evsel));
337 } else if (err == EINVAL && sample_id_all_avail) { 340 } else if (err == EINVAL && sample_id_all_avail) {
338 /* 341 /*
339 * Old kernel, no attr->sample_id_type_all field 342 * Old kernel, no attr->sample_id_type_all field
@@ -480,6 +483,7 @@ static void atexit_header(void)
480 process_buildids(); 483 process_buildids();
481 perf_header__write(&session->header, output, true); 484 perf_header__write(&session->header, output, true);
482 perf_session__delete(session); 485 perf_session__delete(session);
486 perf_evsel_list__delete();
483 symbol__exit(); 487 symbol__exit();
484 } 488 }
485} 489}
@@ -845,6 +849,8 @@ const struct option record_options[] = {
845 "record events on existing thread id"), 849 "record events on existing thread id"),
846 OPT_INTEGER('r', "realtime", &realtime_prio, 850 OPT_INTEGER('r', "realtime", &realtime_prio,
847 "collect data with this RT SCHED_FIFO priority"), 851 "collect data with this RT SCHED_FIFO priority"),
852 OPT_BOOLEAN('D', "no-delay", &nodelay,
853 "collect data without buffering"),
848 OPT_BOOLEAN('R', "raw-samples", &raw_samples, 854 OPT_BOOLEAN('R', "raw-samples", &raw_samples,
849 "collect raw sample records from all opened counters"), 855 "collect raw sample records from all opened counters"),
850 OPT_BOOLEAN('a', "all-cpus", &system_wide, 856 OPT_BOOLEAN('a', "all-cpus", &system_wide,
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index abd4b8497bc4..29e7ffd85690 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1843,15 +1843,15 @@ static const char *record_args[] = {
1843 "-f", 1843 "-f",
1844 "-m", "1024", 1844 "-m", "1024",
1845 "-c", "1", 1845 "-c", "1",
1846 "-e", "sched:sched_switch:r", 1846 "-e", "sched:sched_switch",
1847 "-e", "sched:sched_stat_wait:r", 1847 "-e", "sched:sched_stat_wait",
1848 "-e", "sched:sched_stat_sleep:r", 1848 "-e", "sched:sched_stat_sleep",
1849 "-e", "sched:sched_stat_iowait:r", 1849 "-e", "sched:sched_stat_iowait",
1850 "-e", "sched:sched_stat_runtime:r", 1850 "-e", "sched:sched_stat_runtime",
1851 "-e", "sched:sched_process_exit:r", 1851 "-e", "sched:sched_process_exit",
1852 "-e", "sched:sched_process_fork:r", 1852 "-e", "sched:sched_process_fork",
1853 "-e", "sched:sched_wakeup:r", 1853 "-e", "sched:sched_wakeup",
1854 "-e", "sched:sched_migrate_task:r", 1854 "-e", "sched:sched_migrate_task",
1855}; 1855};
1856 1856
1857static int __cmd_record(int argc, const char **argv) 1857static int __cmd_record(int argc, const char **argv)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c385a63ebfd1..0ff11d9b13be 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -743,6 +743,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
743out_free_fd: 743out_free_fd:
744 list_for_each_entry(pos, &evsel_list, node) 744 list_for_each_entry(pos, &evsel_list, node)
745 perf_evsel__free_stat_priv(pos); 745 perf_evsel__free_stat_priv(pos);
746 perf_evsel_list__delete();
746out: 747out:
747 thread_map__delete(threads); 748 thread_map__delete(threads);
748 threads = NULL; 749 threads = NULL;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6ce4042421bd..05344c6210ac 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,8 +1247,6 @@ try_again:
1247 die("Permission error - are you root?\n" 1247 die("Permission error - are you root?\n"
1248 "\t Consider tweaking" 1248 "\t Consider tweaking"
1249 " /proc/sys/kernel/perf_event_paranoid.\n"); 1249 " /proc/sys/kernel/perf_event_paranoid.\n");
1250 if (err == ENOENT)
1251 die("%s event is not supported. ", event_name(evsel));
1252 /* 1250 /*
1253 * If it's cycles then fall back to hrtimer 1251 * If it's cycles then fall back to hrtimer
1254 * based cpu-clock-tick sw counter, which 1252 * based cpu-clock-tick sw counter, which
@@ -1473,6 +1471,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1473 pos->attr.sample_period = default_interval; 1471 pos->attr.sample_period = default_interval;
1474 } 1472 }
1475 1473
1474 sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
1475
1476 symbol_conf.priv_size = (sizeof(struct sym_entry) + 1476 symbol_conf.priv_size = (sizeof(struct sym_entry) +
1477 (nr_counters + 1) * sizeof(unsigned long)); 1477 (nr_counters + 1) * sizeof(unsigned long));
1478 1478
@@ -1490,6 +1490,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1490out_free_fd: 1490out_free_fd:
1491 list_for_each_entry(pos, &evsel_list, node) 1491 list_for_each_entry(pos, &evsel_list, node)
1492 perf_evsel__free_mmap(pos); 1492 perf_evsel__free_mmap(pos);
1493 perf_evsel_list__delete();
1493 1494
1494 return status; 1495 return status;
1495} 1496}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 5b1ecd66bb36..595d0f4a7103 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -286,8 +286,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
286 status = p->fn(argc, argv, prefix); 286 status = p->fn(argc, argv, prefix);
287 exit_browser(status); 287 exit_browser(status);
288 288
289 perf_evsel_list__delete();
290
291 if (status) 289 if (status)
292 return status & 0xff; 290 return status & 0xff;
293 291