aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-12-25 08:44:43 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:22:24 -0500
commit9e31905f293ae84e4f120ed9e414031eaefa0bdf (patch)
tree153204ff0dca820e760007bc24075ec7fb46a276 /kernel
parentff5c2c0316ff0e3e2dba3ca14167d994453df093 (diff)
parentb3d9468a8bd218a695e3a0ff112cd4efd27b670a (diff)
Merge remote-tracking branch 'tip/perf/core' into kvm-updates/3.3
* tip/perf/core: (66 commits) perf, x86: Expose perf capability to other modules perf, x86: Implement arch event mask as quirk x86, perf: Disable non available architectural events jump_label: Provide jump_label_key initializers jump_label, x86: Fix section mismatch perf, core: Rate limit perf_sched_events jump_label patching perf: Fix enable_on_exec for sibling events perf: Remove superfluous arguments perf, x86: Prefer fixed-purpose counters when scheduling perf, x86: Fix event scheduler for constraints with overlapping counters perf, x86: Implement event scheduler helper functions perf: Avoid a useless pmu_disable() in the perf-tick x86/tools: Add decoded instruction dump mode x86: Update instruction decoder to support new AVX formats x86/tools: Fix insn_sanity message outputs x86/tools: Fix instruction decoder message output x86: Fix instruction decoder to handle grouped AVX instructions x86/tools: Fix Makefile to build all test tools perf test: Soft errors shouldn't stop the "Validate PERF_RECORD_" test perf test: Validate PERF_RECORD_ events and perf_sample fields ... Signed-off-by: Avi Kivity <avi@redhat.com> * commit 'b3d9468a8bd218a695e3a0ff112cd4efd27b670a': (66 commits) perf, x86: Expose perf capability to other modules perf, x86: Implement arch event mask as quirk x86, perf: Disable non available architectural events jump_label: Provide jump_label_key initializers jump_label, x86: Fix section mismatch perf, core: Rate limit perf_sched_events jump_label patching perf: Fix enable_on_exec for sibling events perf: Remove superfluous arguments perf, x86: Prefer fixed-purpose counters when scheduling perf, x86: Fix event scheduler for constraints with overlapping counters perf, x86: Implement event scheduler helper functions perf: Avoid a useless pmu_disable() in the perf-tick x86/tools: Add decoded instruction dump mode x86: Update instruction decoder to support new AVX formats x86/tools: Fix insn_sanity message outputs x86/tools: Fix instruction decoder message output x86: Fix instruction decoder to handle grouped AVX instructions x86/tools: Fix Makefile to build all test tools perf test: Soft errors shouldn't stop the "Validate PERF_RECORD_" test perf test: Validate PERF_RECORD_ events and perf_sample fields ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/callchain.c191
-rw-r--r--kernel/events/core.c296
-rw-r--r--kernel/events/internal.h39
-rw-r--r--kernel/jump_label.c49
-rw-r--r--kernel/lockdep.c30
-rw-r--r--kernel/trace/trace.c105
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events_filter.c26
-rw-r--r--kernel/trace/trace_irqsoff.c13
-rw-r--r--kernel/trace/trace_output.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
12 files changed, 476 insertions, 306 deletions
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 89e5e8aa4c36..22d901f9caf4 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
2CFLAGS_REMOVE_core.o = -pg 2CFLAGS_REMOVE_core.o = -pg
3endif 3endif
4 4
5obj-y := core.o ring_buffer.o 5obj-y := core.o ring_buffer.o callchain.o
6obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 6obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
new file mode 100644
index 000000000000..057e24b665cf
--- /dev/null
+++ b/kernel/events/callchain.c
@@ -0,0 +1,191 @@
1/*
2 * Performance events callchain code, extracted from core.c:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/slab.h>
14#include "internal.h"
15
16struct callchain_cpus_entries {
17 struct rcu_head rcu_head;
18 struct perf_callchain_entry *cpu_entries[0];
19};
20
21static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
22static atomic_t nr_callchain_events;
23static DEFINE_MUTEX(callchain_mutex);
24static struct callchain_cpus_entries *callchain_cpus_entries;
25
26
27__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
28 struct pt_regs *regs)
29{
30}
31
32__weak void perf_callchain_user(struct perf_callchain_entry *entry,
33 struct pt_regs *regs)
34{
35}
36
37static void release_callchain_buffers_rcu(struct rcu_head *head)
38{
39 struct callchain_cpus_entries *entries;
40 int cpu;
41
42 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
43
44 for_each_possible_cpu(cpu)
45 kfree(entries->cpu_entries[cpu]);
46
47 kfree(entries);
48}
49
50static void release_callchain_buffers(void)
51{
52 struct callchain_cpus_entries *entries;
53
54 entries = callchain_cpus_entries;
55 rcu_assign_pointer(callchain_cpus_entries, NULL);
56 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
57}
58
59static int alloc_callchain_buffers(void)
60{
61 int cpu;
62 int size;
63 struct callchain_cpus_entries *entries;
64
65 /*
66 * We can't use the percpu allocation API for data that can be
67 * accessed from NMI. Use a temporary manual per cpu allocation
68 * until that gets sorted out.
69 */
70 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
71
72 entries = kzalloc(size, GFP_KERNEL);
73 if (!entries)
74 return -ENOMEM;
75
76 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
77
78 for_each_possible_cpu(cpu) {
79 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
80 cpu_to_node(cpu));
81 if (!entries->cpu_entries[cpu])
82 goto fail;
83 }
84
85 rcu_assign_pointer(callchain_cpus_entries, entries);
86
87 return 0;
88
89fail:
90 for_each_possible_cpu(cpu)
91 kfree(entries->cpu_entries[cpu]);
92 kfree(entries);
93
94 return -ENOMEM;
95}
96
97int get_callchain_buffers(void)
98{
99 int err = 0;
100 int count;
101
102 mutex_lock(&callchain_mutex);
103
104 count = atomic_inc_return(&nr_callchain_events);
105 if (WARN_ON_ONCE(count < 1)) {
106 err = -EINVAL;
107 goto exit;
108 }
109
110 if (count > 1) {
111 /* If the allocation failed, give up */
112 if (!callchain_cpus_entries)
113 err = -ENOMEM;
114 goto exit;
115 }
116
117 err = alloc_callchain_buffers();
118 if (err)
119 release_callchain_buffers();
120exit:
121 mutex_unlock(&callchain_mutex);
122
123 return err;
124}
125
126void put_callchain_buffers(void)
127{
128 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
129 release_callchain_buffers();
130 mutex_unlock(&callchain_mutex);
131 }
132}
133
134static struct perf_callchain_entry *get_callchain_entry(int *rctx)
135{
136 int cpu;
137 struct callchain_cpus_entries *entries;
138
139 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
140 if (*rctx == -1)
141 return NULL;
142
143 entries = rcu_dereference(callchain_cpus_entries);
144 if (!entries)
145 return NULL;
146
147 cpu = smp_processor_id();
148
149 return &entries->cpu_entries[cpu][*rctx];
150}
151
152static void
153put_callchain_entry(int rctx)
154{
155 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
156}
157
158struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
159{
160 int rctx;
161 struct perf_callchain_entry *entry;
162
163
164 entry = get_callchain_entry(&rctx);
165 if (rctx == -1)
166 return NULL;
167
168 if (!entry)
169 goto exit_put;
170
171 entry->nr = 0;
172
173 if (!user_mode(regs)) {
174 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
175 perf_callchain_kernel(entry, regs);
176 if (current->mm)
177 regs = task_pt_regs(current);
178 else
179 regs = NULL;
180 }
181
182 if (regs) {
183 perf_callchain_store(entry, PERF_CONTEXT_USER);
184 perf_callchain_user(entry, regs);
185 }
186
187exit_put:
188 put_callchain_entry(rctx);
189
190 return entry;
191}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 58690af323e4..24e3a4b1c89c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -128,7 +128,7 @@ enum event_type_t {
128 * perf_sched_events : >0 events exist 128 * perf_sched_events : >0 events exist
129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
130 */ 130 */
131struct jump_label_key perf_sched_events __read_mostly; 131struct jump_label_key_deferred perf_sched_events __read_mostly;
132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
133 133
134static atomic_t nr_mmap_events __read_mostly; 134static atomic_t nr_mmap_events __read_mostly;
@@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event,
1130 if (!is_software_event(event)) 1130 if (!is_software_event(event))
1131 cpuctx->active_oncpu--; 1131 cpuctx->active_oncpu--;
1132 ctx->nr_active--; 1132 ctx->nr_active--;
1133 if (event->attr.freq && event->attr.sample_freq)
1134 ctx->nr_freq--;
1133 if (event->attr.exclusive || !cpuctx->active_oncpu) 1135 if (event->attr.exclusive || !cpuctx->active_oncpu)
1134 cpuctx->exclusive = 0; 1136 cpuctx->exclusive = 0;
1135} 1137}
@@ -1325,6 +1327,7 @@ retry:
1325 } 1327 }
1326 raw_spin_unlock_irq(&ctx->lock); 1328 raw_spin_unlock_irq(&ctx->lock);
1327} 1329}
1330EXPORT_SYMBOL_GPL(perf_event_disable);
1328 1331
1329static void perf_set_shadow_time(struct perf_event *event, 1332static void perf_set_shadow_time(struct perf_event *event,
1330 struct perf_event_context *ctx, 1333 struct perf_event_context *ctx,
@@ -1406,6 +1409,8 @@ event_sched_in(struct perf_event *event,
1406 if (!is_software_event(event)) 1409 if (!is_software_event(event))
1407 cpuctx->active_oncpu++; 1410 cpuctx->active_oncpu++;
1408 ctx->nr_active++; 1411 ctx->nr_active++;
1412 if (event->attr.freq && event->attr.sample_freq)
1413 ctx->nr_freq++;
1409 1414
1410 if (event->attr.exclusive) 1415 if (event->attr.exclusive)
1411 cpuctx->exclusive = 1; 1416 cpuctx->exclusive = 1;
@@ -1662,8 +1667,7 @@ retry:
1662 * Note: this works for group members as well as group leaders 1667 * Note: this works for group members as well as group leaders
1663 * since the non-leader members' sibling_lists will be empty. 1668 * since the non-leader members' sibling_lists will be empty.
1664 */ 1669 */
1665static void __perf_event_mark_enabled(struct perf_event *event, 1670static void __perf_event_mark_enabled(struct perf_event *event)
1666 struct perf_event_context *ctx)
1667{ 1671{
1668 struct perf_event *sub; 1672 struct perf_event *sub;
1669 u64 tstamp = perf_event_time(event); 1673 u64 tstamp = perf_event_time(event);
@@ -1701,7 +1705,7 @@ static int __perf_event_enable(void *info)
1701 */ 1705 */
1702 perf_cgroup_set_timestamp(current, ctx); 1706 perf_cgroup_set_timestamp(current, ctx);
1703 1707
1704 __perf_event_mark_enabled(event, ctx); 1708 __perf_event_mark_enabled(event);
1705 1709
1706 if (!event_filter_match(event)) { 1710 if (!event_filter_match(event)) {
1707 if (is_cgroup_event(event)) 1711 if (is_cgroup_event(event))
@@ -1782,7 +1786,7 @@ void perf_event_enable(struct perf_event *event)
1782 1786
1783retry: 1787retry:
1784 if (!ctx->is_active) { 1788 if (!ctx->is_active) {
1785 __perf_event_mark_enabled(event, ctx); 1789 __perf_event_mark_enabled(event);
1786 goto out; 1790 goto out;
1787 } 1791 }
1788 1792
@@ -1809,6 +1813,7 @@ retry:
1809out: 1813out:
1810 raw_spin_unlock_irq(&ctx->lock); 1814 raw_spin_unlock_irq(&ctx->lock);
1811} 1815}
1816EXPORT_SYMBOL_GPL(perf_event_enable);
1812 1817
1813int perf_event_refresh(struct perf_event *event, int refresh) 1818int perf_event_refresh(struct perf_event *event, int refresh)
1814{ 1819{
@@ -2327,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2327 u64 interrupts, now; 2332 u64 interrupts, now;
2328 s64 delta; 2333 s64 delta;
2329 2334
2335 if (!ctx->nr_freq)
2336 return;
2337
2330 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2338 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2331 if (event->state != PERF_EVENT_STATE_ACTIVE) 2339 if (event->state != PERF_EVENT_STATE_ACTIVE)
2332 continue; 2340 continue;
@@ -2382,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2382{ 2390{
2383 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; 2391 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
2384 struct perf_event_context *ctx = NULL; 2392 struct perf_event_context *ctx = NULL;
2385 int rotate = 0, remove = 1; 2393 int rotate = 0, remove = 1, freq = 0;
2386 2394
2387 if (cpuctx->ctx.nr_events) { 2395 if (cpuctx->ctx.nr_events) {
2388 remove = 0; 2396 remove = 0;
2389 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2397 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2390 rotate = 1; 2398 rotate = 1;
2399 if (cpuctx->ctx.nr_freq)
2400 freq = 1;
2391 } 2401 }
2392 2402
2393 ctx = cpuctx->task_ctx; 2403 ctx = cpuctx->task_ctx;
@@ -2395,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2395 remove = 0; 2405 remove = 0;
2396 if (ctx->nr_events != ctx->nr_active) 2406 if (ctx->nr_events != ctx->nr_active)
2397 rotate = 1; 2407 rotate = 1;
2408 if (ctx->nr_freq)
2409 freq = 1;
2398 } 2410 }
2399 2411
2412 if (!rotate && !freq)
2413 goto done;
2414
2400 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2415 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2401 perf_pmu_disable(cpuctx->ctx.pmu); 2416 perf_pmu_disable(cpuctx->ctx.pmu);
2402 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2403 if (ctx)
2404 perf_ctx_adjust_freq(ctx, interval);
2405 2417
2406 if (!rotate) 2418 if (freq) {
2407 goto done; 2419 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2420 if (ctx)
2421 perf_ctx_adjust_freq(ctx, interval);
2422 }
2408 2423
2409 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2424 if (rotate) {
2410 if (ctx) 2425 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2411 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2426 if (ctx)
2427 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2412 2428
2413 rotate_ctx(&cpuctx->ctx); 2429 rotate_ctx(&cpuctx->ctx);
2414 if (ctx) 2430 if (ctx)
2415 rotate_ctx(ctx); 2431 rotate_ctx(ctx);
2416 2432
2417 perf_event_sched_in(cpuctx, ctx, current); 2433 perf_event_sched_in(cpuctx, ctx, current);
2434 }
2435
2436 perf_pmu_enable(cpuctx->ctx.pmu);
2437 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2418 2438
2419done: 2439done:
2420 if (remove) 2440 if (remove)
2421 list_del_init(&cpuctx->rotation_list); 2441 list_del_init(&cpuctx->rotation_list);
2422
2423 perf_pmu_enable(cpuctx->ctx.pmu);
2424 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2425} 2442}
2426 2443
2427void perf_event_task_tick(void) 2444void perf_event_task_tick(void)
@@ -2448,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event,
2448 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2465 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2449 return 0; 2466 return 0;
2450 2467
2451 __perf_event_mark_enabled(event, ctx); 2468 __perf_event_mark_enabled(event);
2452 2469
2453 return 1; 2470 return 1;
2454} 2471}
@@ -2480,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2480 raw_spin_lock(&ctx->lock); 2497 raw_spin_lock(&ctx->lock);
2481 task_ctx_sched_out(ctx); 2498 task_ctx_sched_out(ctx);
2482 2499
2483 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2500 list_for_each_entry(event, &ctx->event_list, event_entry) {
2484 ret = event_enable_on_exec(event, ctx);
2485 if (ret)
2486 enabled = 1;
2487 }
2488
2489 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2490 ret = event_enable_on_exec(event, ctx); 2501 ret = event_enable_on_exec(event, ctx);
2491 if (ret) 2502 if (ret)
2492 enabled = 1; 2503 enabled = 1;
@@ -2574,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event)
2574} 2585}
2575 2586
2576/* 2587/*
2577 * Callchain support
2578 */
2579
2580struct callchain_cpus_entries {
2581 struct rcu_head rcu_head;
2582 struct perf_callchain_entry *cpu_entries[0];
2583};
2584
2585static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
2586static atomic_t nr_callchain_events;
2587static DEFINE_MUTEX(callchain_mutex);
2588struct callchain_cpus_entries *callchain_cpus_entries;
2589
2590
2591__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
2592 struct pt_regs *regs)
2593{
2594}
2595
2596__weak void perf_callchain_user(struct perf_callchain_entry *entry,
2597 struct pt_regs *regs)
2598{
2599}
2600
2601static void release_callchain_buffers_rcu(struct rcu_head *head)
2602{
2603 struct callchain_cpus_entries *entries;
2604 int cpu;
2605
2606 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
2607
2608 for_each_possible_cpu(cpu)
2609 kfree(entries->cpu_entries[cpu]);
2610
2611 kfree(entries);
2612}
2613
2614static void release_callchain_buffers(void)
2615{
2616 struct callchain_cpus_entries *entries;
2617
2618 entries = callchain_cpus_entries;
2619 rcu_assign_pointer(callchain_cpus_entries, NULL);
2620 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
2621}
2622
2623static int alloc_callchain_buffers(void)
2624{
2625 int cpu;
2626 int size;
2627 struct callchain_cpus_entries *entries;
2628
2629 /*
2630 * We can't use the percpu allocation API for data that can be
2631 * accessed from NMI. Use a temporary manual per cpu allocation
2632 * until that gets sorted out.
2633 */
2634 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
2635
2636 entries = kzalloc(size, GFP_KERNEL);
2637 if (!entries)
2638 return -ENOMEM;
2639
2640 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
2641
2642 for_each_possible_cpu(cpu) {
2643 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2644 cpu_to_node(cpu));
2645 if (!entries->cpu_entries[cpu])
2646 goto fail;
2647 }
2648
2649 rcu_assign_pointer(callchain_cpus_entries, entries);
2650
2651 return 0;
2652
2653fail:
2654 for_each_possible_cpu(cpu)
2655 kfree(entries->cpu_entries[cpu]);
2656 kfree(entries);
2657
2658 return -ENOMEM;
2659}
2660
2661static int get_callchain_buffers(void)
2662{
2663 int err = 0;
2664 int count;
2665
2666 mutex_lock(&callchain_mutex);
2667
2668 count = atomic_inc_return(&nr_callchain_events);
2669 if (WARN_ON_ONCE(count < 1)) {
2670 err = -EINVAL;
2671 goto exit;
2672 }
2673
2674 if (count > 1) {
2675 /* If the allocation failed, give up */
2676 if (!callchain_cpus_entries)
2677 err = -ENOMEM;
2678 goto exit;
2679 }
2680
2681 err = alloc_callchain_buffers();
2682 if (err)
2683 release_callchain_buffers();
2684exit:
2685 mutex_unlock(&callchain_mutex);
2686
2687 return err;
2688}
2689
2690static void put_callchain_buffers(void)
2691{
2692 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2693 release_callchain_buffers();
2694 mutex_unlock(&callchain_mutex);
2695 }
2696}
2697
2698static int get_recursion_context(int *recursion)
2699{
2700 int rctx;
2701
2702 if (in_nmi())
2703 rctx = 3;
2704 else if (in_irq())
2705 rctx = 2;
2706 else if (in_softirq())
2707 rctx = 1;
2708 else
2709 rctx = 0;
2710
2711 if (recursion[rctx])
2712 return -1;
2713
2714 recursion[rctx]++;
2715 barrier();
2716
2717 return rctx;
2718}
2719
2720static inline void put_recursion_context(int *recursion, int rctx)
2721{
2722 barrier();
2723 recursion[rctx]--;
2724}
2725
2726static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2727{
2728 int cpu;
2729 struct callchain_cpus_entries *entries;
2730
2731 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2732 if (*rctx == -1)
2733 return NULL;
2734
2735 entries = rcu_dereference(callchain_cpus_entries);
2736 if (!entries)
2737 return NULL;
2738
2739 cpu = smp_processor_id();
2740
2741 return &entries->cpu_entries[cpu][*rctx];
2742}
2743
2744static void
2745put_callchain_entry(int rctx)
2746{
2747 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2748}
2749
2750static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2751{
2752 int rctx;
2753 struct perf_callchain_entry *entry;
2754
2755
2756 entry = get_callchain_entry(&rctx);
2757 if (rctx == -1)
2758 return NULL;
2759
2760 if (!entry)
2761 goto exit_put;
2762
2763 entry->nr = 0;
2764
2765 if (!user_mode(regs)) {
2766 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2767 perf_callchain_kernel(entry, regs);
2768 if (current->mm)
2769 regs = task_pt_regs(current);
2770 else
2771 regs = NULL;
2772 }
2773
2774 if (regs) {
2775 perf_callchain_store(entry, PERF_CONTEXT_USER);
2776 perf_callchain_user(entry, regs);
2777 }
2778
2779exit_put:
2780 put_callchain_entry(rctx);
2781
2782 return entry;
2783}
2784
2785/*
2786 * Initialize the perf_event context in a task_struct: 2588 * Initialize the perf_event context in a task_struct:
2787 */ 2589 */
2788static void __perf_event_init_context(struct perf_event_context *ctx) 2590static void __perf_event_init_context(struct perf_event_context *ctx)
@@ -2946,7 +2748,7 @@ static void free_event(struct perf_event *event)
2946 2748
2947 if (!event->parent) { 2749 if (!event->parent) {
2948 if (event->attach_state & PERF_ATTACH_TASK) 2750 if (event->attach_state & PERF_ATTACH_TASK)
2949 jump_label_dec(&perf_sched_events); 2751 jump_label_dec_deferred(&perf_sched_events);
2950 if (event->attr.mmap || event->attr.mmap_data) 2752 if (event->attr.mmap || event->attr.mmap_data)
2951 atomic_dec(&nr_mmap_events); 2753 atomic_dec(&nr_mmap_events);
2952 if (event->attr.comm) 2754 if (event->attr.comm)
@@ -2957,7 +2759,7 @@ static void free_event(struct perf_event *event)
2957 put_callchain_buffers(); 2759 put_callchain_buffers();
2958 if (is_cgroup_event(event)) { 2760 if (is_cgroup_event(event)) {
2959 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); 2761 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2960 jump_label_dec(&perf_sched_events); 2762 jump_label_dec_deferred(&perf_sched_events);
2961 } 2763 }
2962 } 2764 }
2963 2765
@@ -4820,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4820 struct hw_perf_event *hwc = &event->hw; 4622 struct hw_perf_event *hwc = &event->hw;
4821 int throttle = 0; 4623 int throttle = 0;
4822 4624
4823 data->period = event->hw.last_period;
4824 if (!overflow) 4625 if (!overflow)
4825 overflow = perf_swevent_set_period(event); 4626 overflow = perf_swevent_set_period(event);
4826 4627
@@ -4854,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
4854 if (!is_sampling_event(event)) 4655 if (!is_sampling_event(event))
4855 return; 4656 return;
4856 4657
4658 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4659 data->period = nr;
4660 return perf_swevent_overflow(event, 1, data, regs);
4661 } else
4662 data->period = event->hw.last_period;
4663
4857 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 4664 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4858 return perf_swevent_overflow(event, 1, data, regs); 4665 return perf_swevent_overflow(event, 1, data, regs);
4859 4666
@@ -5981,7 +5788,7 @@ done:
5981 5788
5982 if (!event->parent) { 5789 if (!event->parent) {
5983 if (event->attach_state & PERF_ATTACH_TASK) 5790 if (event->attach_state & PERF_ATTACH_TASK)
5984 jump_label_inc(&perf_sched_events); 5791 jump_label_inc(&perf_sched_events.key);
5985 if (event->attr.mmap || event->attr.mmap_data) 5792 if (event->attr.mmap || event->attr.mmap_data)
5986 atomic_inc(&nr_mmap_events); 5793 atomic_inc(&nr_mmap_events);
5987 if (event->attr.comm) 5794 if (event->attr.comm)
@@ -6219,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open,
6219 * - that may need work on context switch 6026 * - that may need work on context switch
6220 */ 6027 */
6221 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); 6028 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6222 jump_label_inc(&perf_sched_events); 6029 jump_label_inc(&perf_sched_events.key);
6223 } 6030 }
6224 6031
6225 /* 6032 /*
@@ -7065,6 +6872,9 @@ void __init perf_event_init(void)
7065 6872
7066 ret = init_hw_breakpoint(); 6873 ret = init_hw_breakpoint();
7067 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 6874 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6875
6876 /* do not patch jump label more than once per second */
6877 jump_label_rate_limit(&perf_sched_events, HZ);
7068} 6878}
7069 6879
7070static int __init perf_event_sysfs_init(void) 6880static int __init perf_event_sysfs_init(void)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 64568a699375..b0b107f90afc 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -1,6 +1,10 @@
1#ifndef _KERNEL_EVENTS_INTERNAL_H 1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H 2#define _KERNEL_EVENTS_INTERNAL_H
3 3
4#include <linux/hardirq.h>
5
6/* Buffer handling */
7
4#define RING_BUFFER_WRITABLE 0x01 8#define RING_BUFFER_WRITABLE 0x01
5 9
6struct ring_buffer { 10struct ring_buffer {
@@ -67,7 +71,7 @@ static inline int page_order(struct ring_buffer *rb)
67} 71}
68#endif 72#endif
69 73
70static unsigned long perf_data_size(struct ring_buffer *rb) 74static inline unsigned long perf_data_size(struct ring_buffer *rb)
71{ 75{
72 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); 76 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
73} 77}
@@ -96,4 +100,37 @@ __output_copy(struct perf_output_handle *handle,
96 } while (len); 100 } while (len);
97} 101}
98 102
103/* Callchain handling */
104extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
105extern int get_callchain_buffers(void);
106extern void put_callchain_buffers(void);
107
108static inline int get_recursion_context(int *recursion)
109{
110 int rctx;
111
112 if (in_nmi())
113 rctx = 3;
114 else if (in_irq())
115 rctx = 2;
116 else if (in_softirq())
117 rctx = 1;
118 else
119 rctx = 0;
120
121 if (recursion[rctx])
122 return -1;
123
124 recursion[rctx]++;
125 barrier();
126
127 return rctx;
128}
129
130static inline void put_recursion_context(int *recursion, int rctx)
131{
132 barrier();
133 recursion[rctx]--;
134}
135
99#endif /* _KERNEL_EVENTS_INTERNAL_H */ 136#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 2af9027106a8..01d3b70fc98a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -73,16 +73,47 @@ void jump_label_inc(struct jump_label_key *key)
73} 73}
74EXPORT_SYMBOL_GPL(jump_label_inc); 74EXPORT_SYMBOL_GPL(jump_label_inc);
75 75
76void jump_label_dec(struct jump_label_key *key) 76static void __jump_label_dec(struct jump_label_key *key,
77 unsigned long rate_limit, struct delayed_work *work)
77{ 78{
78 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) 79 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
79 return; 80 return;
80 81
81 jump_label_update(key, JUMP_LABEL_DISABLE); 82 if (rate_limit) {
83 atomic_inc(&key->enabled);
84 schedule_delayed_work(work, rate_limit);
85 } else
86 jump_label_update(key, JUMP_LABEL_DISABLE);
87
82 jump_label_unlock(); 88 jump_label_unlock();
83} 89}
84EXPORT_SYMBOL_GPL(jump_label_dec); 90EXPORT_SYMBOL_GPL(jump_label_dec);
85 91
92static void jump_label_update_timeout(struct work_struct *work)
93{
94 struct jump_label_key_deferred *key =
95 container_of(work, struct jump_label_key_deferred, work.work);
96 __jump_label_dec(&key->key, 0, NULL);
97}
98
99void jump_label_dec(struct jump_label_key *key)
100{
101 __jump_label_dec(key, 0, NULL);
102}
103
104void jump_label_dec_deferred(struct jump_label_key_deferred *key)
105{
106 __jump_label_dec(&key->key, key->timeout, &key->work);
107}
108
109
110void jump_label_rate_limit(struct jump_label_key_deferred *key,
111 unsigned long rl)
112{
113 key->timeout = rl;
114 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
115}
116
86static int addr_conflict(struct jump_entry *entry, void *start, void *end) 117static int addr_conflict(struct jump_entry *entry, void *start, void *end)
87{ 118{
88 if (entry->code <= (unsigned long)end && 119 if (entry->code <= (unsigned long)end &&
@@ -113,7 +144,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
113 * running code can override this to make the non-live update case 144 * running code can override this to make the non-live update case
114 * cheaper. 145 * cheaper.
115 */ 146 */
116void __weak arch_jump_label_transform_static(struct jump_entry *entry, 147void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
117 enum jump_label_type type) 148 enum jump_label_type type)
118{ 149{
119 arch_jump_label_transform(entry, type); 150 arch_jump_label_transform(entry, type);
@@ -219,8 +250,13 @@ void jump_label_apply_nops(struct module *mod)
219 if (iter_start == iter_stop) 250 if (iter_start == iter_stop)
220 return; 251 return;
221 252
222 for (iter = iter_start; iter < iter_stop; iter++) 253 for (iter = iter_start; iter < iter_stop; iter++) {
223 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); 254 struct jump_label_key *iterk;
255
256 iterk = (struct jump_label_key *)(unsigned long)iter->key;
257 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
258 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
259 }
224} 260}
225 261
226static int jump_label_add_module(struct module *mod) 262static int jump_label_add_module(struct module *mod)
@@ -260,8 +296,7 @@ static int jump_label_add_module(struct module *mod)
260 key->next = jlm; 296 key->next = jlm;
261 297
262 if (jump_label_enabled(key)) 298 if (jump_label_enabled(key))
263 __jump_label_update(key, iter, iter_stop, 299 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
264 JUMP_LABEL_ENABLE);
265 } 300 }
266 301
267 return 0; 302 return 0;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b2e08c932d91..24f176c9fc9f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -499,36 +499,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
499 usage[i] = '\0'; 499 usage[i] = '\0';
500} 500}
501 501
502static int __print_lock_name(struct lock_class *class) 502static void __print_lock_name(struct lock_class *class)
503{ 503{
504 char str[KSYM_NAME_LEN]; 504 char str[KSYM_NAME_LEN];
505 const char *name; 505 const char *name;
506 506
507 name = class->name; 507 name = class->name;
508 if (!name)
509 name = __get_key_name(class->key, str);
510
511 return printk("%s", name);
512}
513
514static void print_lock_name(struct lock_class *class)
515{
516 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
517 const char *name;
518
519 get_usage_chars(class, usage);
520
521 name = class->name;
522 if (!name) { 508 if (!name) {
523 name = __get_key_name(class->key, str); 509 name = __get_key_name(class->key, str);
524 printk(" (%s", name); 510 printk("%s", name);
525 } else { 511 } else {
526 printk(" (%s", name); 512 printk("%s", name);
527 if (class->name_version > 1) 513 if (class->name_version > 1)
528 printk("#%d", class->name_version); 514 printk("#%d", class->name_version);
529 if (class->subclass) 515 if (class->subclass)
530 printk("/%d", class->subclass); 516 printk("/%d", class->subclass);
531 } 517 }
518}
519
520static void print_lock_name(struct lock_class *class)
521{
522 char usage[LOCK_USAGE_CHARS];
523
524 get_usage_chars(class, usage);
525
526 printk(" (");
527 __print_lock_name(class);
532 printk("){%s}", usage); 528 printk("){%s}", usage);
533} 529}
534 530
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275bb60f..7392070ffc39 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
338/* trace_flags holds trace_options default values */ 338/* trace_flags holds trace_options default values */
339unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 339unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
340 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 340 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; 341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
342 TRACE_ITER_IRQ_INFO;
342 343
343static int trace_stop_count; 344static int trace_stop_count;
344static DEFINE_RAW_SPINLOCK(tracing_start_lock); 345static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,6 +427,7 @@ static const char *trace_options[] = {
426 "record-cmd", 427 "record-cmd",
427 "overwrite", 428 "overwrite",
428 "disable_on_free", 429 "disable_on_free",
430 "irq-info",
429 NULL 431 NULL
430}; 432};
431 433
@@ -1843,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p)
1843 trace_event_read_unlock(); 1845 trace_event_read_unlock();
1844} 1846}
1845 1847
1848static void
1849get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
1850{
1851 unsigned long count;
1852 int cpu;
1853
1854 *total = 0;
1855 *entries = 0;
1856
1857 for_each_tracing_cpu(cpu) {
1858 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1859 /*
1860 * If this buffer has skipped entries, then we hold all
1861 * entries for the trace and we need to ignore the
1862 * ones before the time stamp.
1863 */
1864 if (tr->data[cpu]->skipped_entries) {
1865 count -= tr->data[cpu]->skipped_entries;
1866 /* total is the same as the entries */
1867 *total += count;
1868 } else
1869 *total += count +
1870 ring_buffer_overrun_cpu(tr->buffer, cpu);
1871 *entries += count;
1872 }
1873}
1874
1846static void print_lat_help_header(struct seq_file *m) 1875static void print_lat_help_header(struct seq_file *m)
1847{ 1876{
1848 seq_puts(m, "# _------=> CPU# \n"); 1877 seq_puts(m, "# _------=> CPU# \n");
@@ -1855,12 +1884,35 @@ static void print_lat_help_header(struct seq_file *m)
1855 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1884 seq_puts(m, "# \\ / ||||| \\ | / \n");
1856} 1885}
1857 1886
1858static void print_func_help_header(struct seq_file *m) 1887static void print_event_info(struct trace_array *tr, struct seq_file *m)
1888{
1889 unsigned long total;
1890 unsigned long entries;
1891
1892 get_total_entries(tr, &total, &entries);
1893 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
1894 entries, total, num_online_cpus());
1895 seq_puts(m, "#\n");
1896}
1897
1898static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
1859{ 1899{
1860 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1900 print_event_info(tr, m);
1901 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1861 seq_puts(m, "# | | | | |\n"); 1902 seq_puts(m, "# | | | | |\n");
1862} 1903}
1863 1904
1905static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
1906{
1907 print_event_info(tr, m);
1908 seq_puts(m, "# _-----=> irqs-off\n");
1909 seq_puts(m, "# / _----=> need-resched\n");
1910 seq_puts(m, "# | / _---=> hardirq/softirq\n");
1911 seq_puts(m, "# || / _--=> preempt-depth\n");
1912 seq_puts(m, "# ||| / delay\n");
1913 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
1914 seq_puts(m, "# | | | |||| | |\n");
1915}
1864 1916
1865void 1917void
1866print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1918print_trace_header(struct seq_file *m, struct trace_iterator *iter)
@@ -1869,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1869 struct trace_array *tr = iter->tr; 1921 struct trace_array *tr = iter->tr;
1870 struct trace_array_cpu *data = tr->data[tr->cpu]; 1922 struct trace_array_cpu *data = tr->data[tr->cpu];
1871 struct tracer *type = current_trace; 1923 struct tracer *type = current_trace;
1872 unsigned long entries = 0; 1924 unsigned long entries;
1873 unsigned long total = 0; 1925 unsigned long total;
1874 unsigned long count;
1875 const char *name = "preemption"; 1926 const char *name = "preemption";
1876 int cpu;
1877 1927
1878 if (type) 1928 if (type)
1879 name = type->name; 1929 name = type->name;
1880 1930
1881 1931 get_total_entries(tr, &total, &entries);
1882 for_each_tracing_cpu(cpu) {
1883 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1884 /*
1885 * If this buffer has skipped entries, then we hold all
1886 * entries for the trace and we need to ignore the
1887 * ones before the time stamp.
1888 */
1889 if (tr->data[cpu]->skipped_entries) {
1890 count -= tr->data[cpu]->skipped_entries;
1891 /* total is the same as the entries */
1892 total += count;
1893 } else
1894 total += count +
1895 ring_buffer_overrun_cpu(tr->buffer, cpu);
1896 entries += count;
1897 }
1898 1932
1899 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1933 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1900 name, UTS_RELEASE); 1934 name, UTS_RELEASE);
@@ -2140,6 +2174,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
2140 return print_trace_fmt(iter); 2174 return print_trace_fmt(iter);
2141} 2175}
2142 2176
2177void trace_latency_header(struct seq_file *m)
2178{
2179 struct trace_iterator *iter = m->private;
2180
2181 /* print nothing if the buffers are empty */
2182 if (trace_empty(iter))
2183 return;
2184
2185 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2186 print_trace_header(m, iter);
2187
2188 if (!(trace_flags & TRACE_ITER_VERBOSE))
2189 print_lat_help_header(m);
2190}
2191
2143void trace_default_header(struct seq_file *m) 2192void trace_default_header(struct seq_file *m)
2144{ 2193{
2145 struct trace_iterator *iter = m->private; 2194 struct trace_iterator *iter = m->private;
@@ -2155,8 +2204,12 @@ void trace_default_header(struct seq_file *m)
2155 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2204 if (!(trace_flags & TRACE_ITER_VERBOSE))
2156 print_lat_help_header(m); 2205 print_lat_help_header(m);
2157 } else { 2206 } else {
2158 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2207 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2159 print_func_help_header(m); 2208 if (trace_flags & TRACE_ITER_IRQ_INFO)
2209 print_func_help_header_irq(iter->tr, m);
2210 else
2211 print_func_help_header(iter->tr, m);
2212 }
2160 } 2213 }
2161} 2214}
2162 2215
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 092e1f8d18dc..2c2657462ac3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -370,6 +370,7 @@ void trace_graph_function(struct trace_array *tr,
370 unsigned long ip, 370 unsigned long ip,
371 unsigned long parent_ip, 371 unsigned long parent_ip,
372 unsigned long flags, int pc); 372 unsigned long flags, int pc);
373void trace_latency_header(struct seq_file *m);
373void trace_default_header(struct seq_file *m); 374void trace_default_header(struct seq_file *m);
374void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 375void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
375int trace_empty(struct trace_iterator *iter); 376int trace_empty(struct trace_iterator *iter);
@@ -654,6 +655,7 @@ enum trace_iterator_flags {
654 TRACE_ITER_RECORD_CMD = 0x100000, 655 TRACE_ITER_RECORD_CMD = 0x100000,
655 TRACE_ITER_OVERWRITE = 0x200000, 656 TRACE_ITER_OVERWRITE = 0x200000,
656 TRACE_ITER_STOP_ON_FREE = 0x400000, 657 TRACE_ITER_STOP_ON_FREE = 0x400000,
658 TRACE_ITER_IRQ_INFO = 0x800000,
657}; 659};
658 660
659/* 661/*
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 95dc31efd6dd..f04cc3136bd3 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,6 +27,12 @@
27#include "trace.h" 27#include "trace.h"
28#include "trace_output.h" 28#include "trace_output.h"
29 29
30#define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
30enum filter_op_ids 36enum filter_op_ids
31{ 37{
32 OP_OR, 38 OP_OR,
@@ -646,7 +652,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
646 if (filter && filter->filter_string) 652 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string); 653 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else 654 else
649 trace_seq_printf(s, "none\n"); 655 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
650 mutex_unlock(&event_mutex); 656 mutex_unlock(&event_mutex);
651} 657}
652 658
@@ -1838,7 +1844,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1838 if (!filter) 1844 if (!filter)
1839 goto out; 1845 goto out;
1840 1846
1841 replace_filter_string(filter, filter_string); 1847 /* System filters just show a default message */
1848 kfree(filter->filter_string);
1849 filter->filter_string = NULL;
1850
1842 /* 1851 /*
1843 * No event actually uses the system filter 1852 * No event actually uses the system filter
1844 * we can free it without synchronize_sched(). 1853 * we can free it without synchronize_sched().
@@ -1848,14 +1857,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1848 1857
1849 parse_init(ps, filter_ops, filter_string); 1858 parse_init(ps, filter_ops, filter_string);
1850 err = filter_parse(ps); 1859 err = filter_parse(ps);
1851 if (err) { 1860 if (err)
1852 append_filter_err(ps, system->filter); 1861 goto err_filter;
1853 goto out;
1854 }
1855 1862
1856 err = replace_system_preds(system, ps, filter_string); 1863 err = replace_system_preds(system, ps, filter_string);
1857 if (err) 1864 if (err)
1858 append_filter_err(ps, system->filter); 1865 goto err_filter;
1859 1866
1860out: 1867out:
1861 filter_opstack_clear(ps); 1868 filter_opstack_clear(ps);
@@ -1865,6 +1872,11 @@ out_unlock:
1865 mutex_unlock(&event_mutex); 1872 mutex_unlock(&event_mutex);
1866 1873
1867 return err; 1874 return err;
1875
1876err_filter:
1877 replace_filter_string(filter, filter_string);
1878 append_filter_err(ps, system->filter);
1879 goto out;
1868} 1880}
1869 1881
1870#ifdef CONFIG_PERF_EVENTS 1882#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 20dad0d7a163..99d20e920368 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -280,9 +280,20 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
280} 280}
281 281
282static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } 282static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
283static void irqsoff_print_header(struct seq_file *s) { }
284static void irqsoff_trace_open(struct trace_iterator *iter) { } 283static void irqsoff_trace_open(struct trace_iterator *iter) { }
285static void irqsoff_trace_close(struct trace_iterator *iter) { } 284static void irqsoff_trace_close(struct trace_iterator *iter) { }
285
286#ifdef CONFIG_FUNCTION_TRACER
287static void irqsoff_print_header(struct seq_file *s)
288{
289 trace_default_header(s);
290}
291#else
292static void irqsoff_print_header(struct seq_file *s)
293{
294 trace_latency_header(s);
295}
296#endif /* CONFIG_FUNCTION_TRACER */
286#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 297#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
287 298
288/* 299/*
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 51999309a6cf..0d6ff3555942 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -627,11 +627,23 @@ int trace_print_context(struct trace_iterator *iter)
627 unsigned long usec_rem = do_div(t, USEC_PER_SEC); 627 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
628 unsigned long secs = (unsigned long)t; 628 unsigned long secs = (unsigned long)t;
629 char comm[TASK_COMM_LEN]; 629 char comm[TASK_COMM_LEN];
630 int ret;
630 631
631 trace_find_cmdline(entry->pid, comm); 632 trace_find_cmdline(entry->pid, comm);
632 633
633 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", 634 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
634 comm, entry->pid, iter->cpu, secs, usec_rem); 635 comm, entry->pid, iter->cpu);
636 if (!ret)
637 return 0;
638
639 if (trace_flags & TRACE_ITER_IRQ_INFO) {
640 ret = trace_print_lat_fmt(s, entry);
641 if (!ret)
642 return 0;
643 }
644
645 return trace_seq_printf(s, " %5lu.%06lu: ",
646 secs, usec_rem);
635} 647}
636 648
637int trace_print_lat_context(struct trace_iterator *iter) 649int trace_print_lat_context(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e4a70c0c71b6..ff791ea48b57 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -280,9 +280,20 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
280} 280}
281 281
282static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } 282static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
283static void wakeup_print_header(struct seq_file *s) { }
284static void wakeup_trace_open(struct trace_iterator *iter) { } 283static void wakeup_trace_open(struct trace_iterator *iter) { }
285static void wakeup_trace_close(struct trace_iterator *iter) { } 284static void wakeup_trace_close(struct trace_iterator *iter) { }
285
286#ifdef CONFIG_FUNCTION_TRACER
287static void wakeup_print_header(struct seq_file *s)
288{
289 trace_default_header(s);
290}
291#else
292static void wakeup_print_header(struct seq_file *s)
293{
294 trace_latency_header(s);
295}
296#endif /* CONFIG_FUNCTION_TRACER */
286#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 297#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
287 298
288/* 299/*