aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/lockdep-design.txt6
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c8
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/perf_counter.h9
-rw-r--r--include/trace/ftrace.h172
-rw-r--r--kernel/lockdep_proc.c3
-rw-r--r--kernel/perf_counter.c22
-rw-r--r--kernel/rtmutex.c4
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events_filter.c20
-rwxr-xr-xscripts/recordmcount.pl9
-rw-r--r--tools/perf/builtin-record.c1
18 files changed, 234 insertions, 66 deletions
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
index e20d913d5914..abf768c681e2 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/lockdep-design.txt
@@ -30,9 +30,9 @@ State
30The validator tracks lock-class usage history into 4n + 1 separate state bits: 30The validator tracks lock-class usage history into 4n + 1 separate state bits:
31 31
32- 'ever held in STATE context' 32- 'ever held in STATE context'
33- 'ever head as readlock in STATE context' 33- 'ever held as readlock in STATE context'
34- 'ever head with STATE enabled' 34- 'ever held with STATE enabled'
35- 'ever head as readlock with STATE enabled' 35- 'ever held as readlock with STATE enabled'
36 36
37Where STATE can be either one of (kernel/lockdep_states.h) 37Where STATE can be either one of (kernel/lockdep_states.h)
38 - hardirq 38 - hardirq
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 2ed4e2bb3b32..a5371ec36776 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
17 return x2apic_enabled(); 17 return x2apic_enabled();
18} 18}
19 19
20/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 20/*
21 21 * need to use more than cpu 0, because we need more vectors when
22 * MSI-X are used.
23 */
22static const struct cpumask *x2apic_target_cpus(void) 24static const struct cpumask *x2apic_target_cpus(void)
23{ 25{
24 return cpumask_of(0); 26 return cpu_online_mask;
25} 27}
26 28
27/* 29/*
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 0b631c6a2e00..a8989aadc99a 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
27 return 0; 27 return 0;
28} 28}
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/*
31 31 * need to use more than cpu 0, because we need more vectors when
32 * MSI-X are used.
33 */
32static const struct cpumask *x2apic_target_cpus(void) 34static const struct cpumask *x2apic_target_cpus(void)
33{ 35{
34 return cpumask_of(0); 36 return cpu_online_mask;
35} 37}
36 38
37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) 39static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 19ccf6d0dccf..fe26ba3e3451 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -354,7 +354,7 @@ void __init efi_init(void)
354 */ 354 */
355 c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); 355 c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
356 if (c16) { 356 if (c16) {
357 for (i = 0; i < sizeof(vendor) && *c16; ++i) 357 for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
358 vendor[i] = *c16++; 358 vendor[i] = *c16++;
359 vendor[i] = '\0'; 359 vendor[i] = '\0';
360 } else 360 } else
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 834c9da8bf9d..9eb897603705 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart);
405#endif /* CONFIG_X86_32 */ 405#endif /* CONFIG_X86_32 */
406 406
407/* 407/*
408 * Apple MacBook5,2 (2009 MacBook) needs reboot=p 408 * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
409 */ 409 */
410static int __init set_pci_reboot(const struct dmi_system_id *d) 410static int __init set_pci_reboot(const struct dmi_system_id *d)
411{ 411{
@@ -426,6 +426,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
426 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), 426 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
427 }, 427 },
428 }, 428 },
429 { /* Handle problems with rebooting on Apple MacBookPro5,1 */
430 .callback = set_pci_reboot,
431 .ident = "Apple MacBookPro5,1",
432 .matches = {
433 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
434 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
435 },
436 },
429 { } 437 { }
430}; 438};
431 439
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index b263423fbe2a..95a7289e4b0c 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
441 ap.ds = __USER_DS; 441 ap.ds = __USER_DS;
442 ap.es = __USER_DS; 442 ap.es = __USER_DS;
443 ap.fs = __KERNEL_PERCPU; 443 ap.fs = __KERNEL_PERCPU;
444 ap.gs = 0; 444 ap.gs = __KERNEL_STACK_CANARY;
445 445
446 ap.eflags = 0; 446 ap.eflags = 0;
447 447
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index d7cd193c2277..a81170de7f6b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
90}; 90};
91 91
92 92void tracing_generic_entry_update(struct trace_entry *entry,
93 unsigned long flags,
94 int pc);
93struct ring_buffer_event * 95struct ring_buffer_event *
94trace_current_buffer_lock_reserve(int type, unsigned long len, 96trace_current_buffer_lock_reserve(int type, unsigned long len,
95 unsigned long flags, int pc); 97 unsigned long flags, int pc);
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index e604e6ef72dd..a67dd5c5b6d3 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
121 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9, 123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_TP_RECORD = 1U << 10,
124 125
125 PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ 126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
126}; 127};
127 128
128/* 129/*
@@ -413,6 +414,11 @@ struct perf_callchain_entry {
413 __u64 ip[PERF_MAX_STACK_DEPTH]; 414 __u64 ip[PERF_MAX_STACK_DEPTH];
414}; 415};
415 416
417struct perf_tracepoint_record {
418 int size;
419 char *record;
420};
421
416struct task_struct; 422struct task_struct;
417 423
418/** 424/**
@@ -681,6 +687,7 @@ struct perf_sample_data {
681 struct pt_regs *regs; 687 struct pt_regs *regs;
682 u64 addr; 688 u64 addr;
683 u64 period; 689 u64 period;
690 void *private;
684}; 691};
685 692
686extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 693extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e5..7fb16d90e7b1 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
144#undef TP_fast_assign 144#undef TP_fast_assign
145#define TP_fast_assign(args...) args 145#define TP_fast_assign(args...) args
146 146
147#undef TP_perf_assign
148#define TP_perf_assign(args...)
149
147#undef TRACE_EVENT 150#undef TRACE_EVENT
148#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
149static int \ 152static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
345 348
346#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
347 350
351#ifdef CONFIG_EVENT_PROFILE
352
353/*
354 * Generate the functions needed for tracepoint perf_counter support.
355 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
359 * {
360 * int ret = 0;
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * }
367 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
369 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count))
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * }
373 *
374 */
375
376#undef TRACE_EVENT
377#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
378 \
379static void ftrace_profile_##call(proto); \
380 \
381static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
382{ \
383 int ret = 0; \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389} \
390 \
391static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
392{ \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \
394 unregister_trace_##call(ftrace_profile_##call); \
395}
396
397#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
398
399#endif
400
348/* 401/*
349 * Stage 4 of the trace events. 402 * Stage 4 of the trace events.
350 * 403 *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
447#define TP_FMT(fmt, args...) fmt "\n", ##args 500#define TP_FMT(fmt, args...) fmt "\n", ##args
448 501
449#ifdef CONFIG_EVENT_PROFILE 502#ifdef CONFIG_EVENT_PROFILE
450#define _TRACE_PROFILE(call, proto, args) \
451static void ftrace_profile_##call(proto) \
452{ \
453 extern void perf_tpcounter_event(int); \
454 perf_tpcounter_event(event_##call.id); \
455} \
456 \
457static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
458{ \
459 int ret = 0; \
460 \
461 if (!atomic_inc_return(&event_call->profile_count)) \
462 ret = register_trace_##call(ftrace_profile_##call); \
463 \
464 return ret; \
465} \
466 \
467static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
468{ \
469 if (atomic_add_negative(-1, &event_call->profile_count)) \
470 unregister_trace_##call(ftrace_profile_##call); \
471}
472 503
473#define _TRACE_PROFILE_INIT(call) \ 504#define _TRACE_PROFILE_INIT(call) \
474 .profile_count = ATOMIC_INIT(-1), \ 505 .profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
476 .profile_disable = ftrace_profile_disable_##call, 507 .profile_disable = ftrace_profile_disable_##call,
477 508
478#else 509#else
479#define _TRACE_PROFILE(call, proto, args)
480#define _TRACE_PROFILE_INIT(call) 510#define _TRACE_PROFILE_INIT(call)
481#endif 511#endif
482 512
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
502 532
503#undef TRACE_EVENT 533#undef TRACE_EVENT
504#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 534#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
505_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
506 \ 535 \
507static struct ftrace_event_call event_##call; \ 536static struct ftrace_event_call event_##call; \
508 \ 537 \
@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
586 615
587#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 616#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
588 617
589#undef _TRACE_PROFILE 618/*
619 * Define the insertion callback to profile events
620 *
621 * The job is very similar to ftrace_raw_event_<call> except that we don't
622 * insert in the ring buffer but in a perf counter.
623 *
624 * static void ftrace_profile_<call>(proto)
625 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags;
632 * int __entry_size;
633 * int __data_size;
634 * int pc;
635 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count();
638 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
640 * __entry_size = __data_size + sizeof(*entry);
641 *
642 * do {
643 * char raw_data[__entry_size]; <- allocate our sample in the stack
644 * struct trace_entry *ent;
645 *
646 * entry = (struct ftrace_raw_<call> *)raw_data;
647 * ent = &entry->ent;
648 * tracing_generic_entry_update(ent, irq_flags, pc);
649 * ent->type = event_call->id;
650 *
651 * <tstruct> <- do some jobs with dynamic arrays
652 *
653 * <assign> <- affect our values
654 *
655 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
656 * __entry_size); <- submit them to perf counter
657 * } while (0);
658 *
659 * }
660 */
661
662#ifdef CONFIG_EVENT_PROFILE
663
664#undef __perf_addr
665#define __perf_addr(a) __addr = (a)
666
667#undef __perf_count
668#define __perf_count(c) __count = (c)
669
670#undef TRACE_EVENT
671#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
672static void ftrace_profile_##call(proto) \
673{ \
674 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
675 struct ftrace_event_call *event_call = &event_##call; \
676 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
677 struct ftrace_raw_##call *entry; \
678 u64 __addr = 0, __count = 1; \
679 unsigned long irq_flags; \
680 int __entry_size; \
681 int __data_size; \
682 int pc; \
683 \
684 local_save_flags(irq_flags); \
685 pc = preempt_count(); \
686 \
687 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
688 __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
689 \
690 do { \
691 char raw_data[__entry_size]; \
692 struct trace_entry *ent; \
693 \
694 entry = (struct ftrace_raw_##call *)raw_data; \
695 ent = &entry->ent; \
696 tracing_generic_entry_update(ent, irq_flags, pc); \
697 ent->type = event_call->id; \
698 \
699 tstruct \
700 \
701 { assign; } \
702 \
703 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
704 __entry_size); \
705 } while (0); \
706 \
707}
708
709#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
710#endif /* CONFIG_EVENT_PROFILE */
711
590#undef _TRACE_PROFILE_INIT 712#undef _TRACE_PROFILE_INIT
591 713
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa2d2c4..e94caa666dba 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void)
758 &proc_lockdep_stats_operations); 758 &proc_lockdep_stats_operations);
759 759
760#ifdef CONFIG_LOCK_STAT 760#ifdef CONFIG_LOCK_STAT
761 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); 761 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
762 &proc_lock_stat_operations);
762#endif 763#endif
763 764
764 return 0; 765 return 0;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 673c1aaf7332..868102172aa4 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2646 u64 counter; 2646 u64 counter;
2647 } group_entry; 2647 } group_entry;
2648 struct perf_callchain_entry *callchain = NULL; 2648 struct perf_callchain_entry *callchain = NULL;
2649 struct perf_tracepoint_record *tp;
2649 int callchain_size = 0; 2650 int callchain_size = 0;
2650 u64 time; 2651 u64 time;
2651 struct { 2652 struct {
@@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2714 header.size += sizeof(u64); 2715 header.size += sizeof(u64);
2715 } 2716 }
2716 2717
2718 if (sample_type & PERF_SAMPLE_TP_RECORD) {
2719 tp = data->private;
2720 header.size += tp->size;
2721 }
2722
2717 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2723 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2718 if (ret) 2724 if (ret)
2719 return; 2725 return;
@@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2777 } 2783 }
2778 } 2784 }
2779 2785
2786 if (sample_type & PERF_SAMPLE_TP_RECORD)
2787 perf_output_copy(&handle, tp->record, tp->size);
2788
2780 perf_output_end(&handle); 2789 perf_output_end(&handle);
2781} 2790}
2782 2791
@@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = {
3703}; 3712};
3704 3713
3705#ifdef CONFIG_EVENT_PROFILE 3714#ifdef CONFIG_EVENT_PROFILE
3706void perf_tpcounter_event(int event_id) 3715void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3716 int entry_size)
3707{ 3717{
3718 struct perf_tracepoint_record tp = {
3719 .size = entry_size,
3720 .record = record,
3721 };
3722
3708 struct perf_sample_data data = { 3723 struct perf_sample_data data = {
3709 .regs = get_irq_regs(), 3724 .regs = get_irq_regs(),
3710 .addr = 0, 3725 .addr = addr,
3726 .private = &tp,
3711 }; 3727 };
3712 3728
3713 if (!data.regs) 3729 if (!data.regs)
3714 data.regs = task_pt_regs(current); 3730 data.regs = task_pt_regs(current);
3715 3731
3716 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3732 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
3717} 3733}
3718EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3734EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3719 3735
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index fcd107a78c5a..29bd4baf9e75 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { 1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
1040 /* We got the lock for task. */ 1040 /* We got the lock for task. */
1041 debug_rt_mutex_lock(lock); 1041 debug_rt_mutex_lock(lock);
1042
1043 rt_mutex_set_owner(lock, task, 0); 1042 rt_mutex_set_owner(lock, task, 0);
1044 1043 spin_unlock(&lock->wait_lock);
1045 rt_mutex_deadlock_account_lock(lock, task); 1044 rt_mutex_deadlock_account_lock(lock, task);
1046 return 1; 1045 return 1;
1047 } 1046 }
1048 1047
1049 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); 1048 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1050 1049
1051
1052 if (ret && !waiter->task) { 1050 if (ret && !waiter->task) {
1053 /* 1051 /*
1054 * Reset the return value. We might have 1052 * Reset the return value. We might have
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bf27bb7a63e2..a330513d96ce 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)
735 735
736 put_online_cpus(); 736 put_online_cpus();
737 737
738 kfree(buffer->buffers);
738 free_cpumask_var(buffer->cpumask); 739 free_cpumask_var(buffer->cpumask);
739 740
740 kfree(buffer); 741 kfree(buffer);
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1785 */ 1786 */
1786 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 1787 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1787 1788
1788 if (!rb_try_to_discard(cpu_buffer, event)) 1789 if (rb_try_to_discard(cpu_buffer, event))
1789 goto out; 1790 goto out;
1790 1791
1791 /* 1792 /*
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2383 * the box. Return the padding, and we will release 2384 * the box. Return the padding, and we will release
2384 * the current locks, and try again. 2385 * the current locks, and try again.
2385 */ 2386 */
2386 rb_advance_reader(cpu_buffer);
2387 return event; 2387 return event;
2388 2388
2389 case RINGBUF_TYPE_TIME_EXTEND: 2389 case RINGBUF_TYPE_TIME_EXTEND:
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
2486 * buffer too. A one time deal is all you get from reading 2486 * buffer too. A one time deal is all you get from reading
2487 * the ring buffer from an NMI. 2487 * the ring buffer from an NMI.
2488 */ 2488 */
2489 if (likely(!in_nmi() && !oops_in_progress)) 2489 if (likely(!in_nmi()))
2490 return 1; 2490 return 1;
2491 2491
2492 tracing_off_permanent(); 2492 tracing_off_permanent();
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2519 if (dolock) 2519 if (dolock)
2520 spin_lock(&cpu_buffer->reader_lock); 2520 spin_lock(&cpu_buffer->reader_lock);
2521 event = rb_buffer_peek(buffer, cpu, ts); 2521 event = rb_buffer_peek(buffer, cpu, ts);
2522 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2523 rb_advance_reader(cpu_buffer);
2522 if (dolock) 2524 if (dolock)
2523 spin_unlock(&cpu_buffer->reader_lock); 2525 spin_unlock(&cpu_buffer->reader_lock);
2524 local_irq_restore(flags); 2526 local_irq_restore(flags);
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2590 spin_lock(&cpu_buffer->reader_lock); 2592 spin_lock(&cpu_buffer->reader_lock);
2591 2593
2592 event = rb_buffer_peek(buffer, cpu, ts); 2594 event = rb_buffer_peek(buffer, cpu, ts);
2593 if (!event) 2595 if (event)
2594 goto out_unlock; 2596 rb_advance_reader(cpu_buffer);
2595
2596 rb_advance_reader(cpu_buffer);
2597 2597
2598 out_unlock:
2599 if (dolock) 2598 if (dolock)
2600 spin_unlock(&cpu_buffer->reader_lock); 2599 spin_unlock(&cpu_buffer->reader_lock);
2601 local_irq_restore(flags); 2600 local_irq_restore(flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8930e39b9d8c..c22b40f8f576 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
850} 850}
851EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
851 852
852struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 853struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
853 int type, 854 int type,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3548ae5cc780..8b9f4f6e9559 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
439 int *ent_cpu, u64 *ent_ts); 439 int *ent_cpu, u64 *ent_ts);
440 440
441void tracing_generic_entry_update(struct trace_entry *entry,
442 unsigned long flags,
443 int pc);
444
445void default_wait_pipe(struct trace_iterator *iter); 441void default_wait_pipe(struct trace_iterator *iter);
446void poll_wait_pipe(struct trace_iterator *iter); 442void poll_wait_pipe(struct trace_iterator *iter);
447 443
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 936c621bbf46..f32dc9d1ea7b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
624 return -ENOSPC; 624 return -ENOSPC;
625 } 625 }
626 626
627 filter->preds[filter->n_preds] = pred;
628 filter->n_preds++;
629
630 list_for_each_entry(call, &ftrace_events, list) { 627 list_for_each_entry(call, &ftrace_events, list) {
631 628
632 if (!call->define_fields) 629 if (!call->define_fields)
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
643 } 640 }
644 replace_filter_string(call->filter, filter_string); 641 replace_filter_string(call->filter, filter_string);
645 } 642 }
643
644 filter->preds[filter->n_preds] = pred;
645 filter->n_preds++;
646out: 646out:
647 return err; 647 return err;
648} 648}
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system,
1029 1029
1030 if (elt->op == OP_AND || elt->op == OP_OR) { 1030 if (elt->op == OP_AND || elt->op == OP_OR) {
1031 pred = create_logical_pred(elt->op); 1031 pred = create_logical_pred(elt->op);
1032 if (!pred)
1033 return -ENOMEM;
1032 if (call) { 1034 if (call) {
1033 err = filter_add_pred(ps, call, pred); 1035 err = filter_add_pred(ps, call, pred);
1034 filter_free_pred(pred); 1036 filter_free_pred(pred);
1035 } else 1037 } else {
1036 err = filter_add_subsystem_pred(ps, system, 1038 err = filter_add_subsystem_pred(ps, system,
1037 pred, filter_string); 1039 pred, filter_string);
1040 if (err)
1041 filter_free_pred(pred);
1042 }
1038 if (err) 1043 if (err)
1039 return err; 1044 return err;
1040 1045
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system,
1048 } 1053 }
1049 1054
1050 pred = create_pred(elt->op, operand1, operand2); 1055 pred = create_pred(elt->op, operand1, operand2);
1056 if (!pred)
1057 return -ENOMEM;
1051 if (call) { 1058 if (call) {
1052 err = filter_add_pred(ps, call, pred); 1059 err = filter_add_pred(ps, call, pred);
1053 filter_free_pred(pred); 1060 filter_free_pred(pred);
1054 } else 1061 } else {
1055 err = filter_add_subsystem_pred(ps, system, pred, 1062 err = filter_add_subsystem_pred(ps, system, pred,
1056 filter_string); 1063 filter_string);
1064 if (err)
1065 filter_free_pred(pred);
1066 }
1057 if (err) 1067 if (err)
1058 return err; 1068 return err;
1059 1069
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d29baa2e063a..911ba7ffab84 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -393,7 +393,7 @@ while (<IN>) {
393 $read_function = 0; 393 $read_function = 0;
394 } 394 }
395 # print out any recorded offsets 395 # print out any recorded offsets
396 update_funcs() if ($text_found); 396 update_funcs() if (defined($ref_func));
397 397
398 # reset all markers and arrays 398 # reset all markers and arrays
399 $text_found = 0; 399 $text_found = 0;
@@ -414,7 +414,10 @@ while (<IN>) {
414 $offset = hex $1; 414 $offset = hex $1;
415 } else { 415 } else {
416 # if we already have a function, and this is weak, skip it 416 # if we already have a function, and this is weak, skip it
417 if (!defined($ref_func) && !defined($weak{$text})) { 417 if (!defined($ref_func) && !defined($weak{$text}) &&
418 # PPC64 can have symbols that start with .L and
419 # gcc considers these special. Don't use them!
420 $text !~ /^\.L/) {
418 $ref_func = $text; 421 $ref_func = $text;
419 $offset = hex $1; 422 $offset = hex $1;
420 } 423 }
@@ -441,7 +444,7 @@ while (<IN>) {
441} 444}
442 445
443# dump out anymore offsets that may have been found 446# dump out anymore offsets that may have been found
444update_funcs() if ($text_found); 447update_funcs() if (defined($ref_func));
445 448
446# If we did not find any mcount callers, we are done (do nothing). 449# If we did not find any mcount callers, we are done (do nothing).
447if (!$opened) { 450if (!$opened) {
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6da09928130f..90c98082af10 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -412,6 +412,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
412 if (call_graph) 412 if (call_graph)
413 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 413 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
414 414
415
415 attr->mmap = track; 416 attr->mmap = track;
416 attr->comm = track; 417 attr->comm = track;
417 attr->inherit = (cpu < 0) && inherit; 418 attr->inherit = (cpu < 0) && inherit;