diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-19 20:36:08 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-19 20:36:08 -0400 |
| commit | df48d8716eab9608fe93924e4ae06ff110e8674f (patch) | |
| tree | 0fe10733a414b3651e1dae29518b7960a4da0aa4 /kernel | |
| parent | acd30250d7d0f495685d1c7c6184636a22fcdf7f (diff) | |
| parent | 29510ec3b626c86de9707bb8904ff940d430289b (diff) | |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (107 commits)
perf stat: Add more cache-miss percentage printouts
perf stat: Add -d -d and -d -d -d options to show more CPU events
ftrace/kbuild: Add recordmcount files to force full build
ftrace: Add self-tests for multiple function trace users
ftrace: Modify ftrace_set_filter/notrace to take ops
ftrace: Allow dynamically allocated function tracers
ftrace: Implement separate user function filtering
ftrace: Free hash with call_rcu_sched()
ftrace: Have global_ops store the functions that are to be traced
ftrace: Add ops parameter to ftrace_startup/shutdown functions
ftrace: Add enabled_functions file
ftrace: Use counters to enable functions to trace
ftrace: Separate hash allocation and assignment
ftrace: Create a global_ops to hold the filter and notrace hashes
ftrace: Use hash instead for FTRACE_FL_FILTER
ftrace: Replace FTRACE_FL_NOTRACE flag with a hash of ignored functions
perf bench, x86: Add alternatives-asm.h wrapper
x86, 64-bit: Fix copy_[to/from]_user() checks for the userspace address limit
x86, mem: memset_64.S: Optimize memset by enhanced REP MOVSB/STOSB
x86, mem: memmove_64.S: Optimize memmove by enhanced REP MOVSB/STOSB
...
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 6 | ||||
| -rw-r--r-- | kernel/events/Makefile | 6 | ||||
| -rw-r--r-- | kernel/events/core.c (renamed from kernel/perf_event.c) | 44 | ||||
| -rw-r--r-- | kernel/events/hw_breakpoint.c (renamed from kernel/hw_breakpoint.c) | 0 | ||||
| -rw-r--r-- | kernel/extable.c | 8 | ||||
| -rw-r--r-- | kernel/jump_label.c | 539 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 1261 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 15 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_printk.c | 120 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 214 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest_dynamic.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 1 | ||||
| -rw-r--r-- | kernel/tracepoint.c | 23 |
18 files changed, 1570 insertions, 682 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 85cbfb31e73e..e9cf19155b46 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -21,7 +21,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
| 21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
| 22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 22 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
| 23 | CFLAGS_REMOVE_sched_clock.o = -pg | 23 | CFLAGS_REMOVE_sched_clock.o = -pg |
| 24 | CFLAGS_REMOVE_perf_event.o = -pg | ||
| 25 | CFLAGS_REMOVE_irq_work.o = -pg | 24 | CFLAGS_REMOVE_irq_work.o = -pg |
| 26 | endif | 25 | endif |
| 27 | 26 | ||
| @@ -103,8 +102,9 @@ obj-$(CONFIG_RING_BUFFER) += trace/ | |||
| 103 | obj-$(CONFIG_TRACEPOINTS) += trace/ | 102 | obj-$(CONFIG_TRACEPOINTS) += trace/ |
| 104 | obj-$(CONFIG_SMP) += sched_cpupri.o | 103 | obj-$(CONFIG_SMP) += sched_cpupri.o |
| 105 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 104 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
| 106 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 105 | |
| 107 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 106 | obj-$(CONFIG_PERF_EVENTS) += events/ |
| 107 | |||
| 108 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 108 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
| 109 | obj-$(CONFIG_PADATA) += padata.o | 109 | obj-$(CONFIG_PADATA) += padata.o |
| 110 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 110 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
diff --git a/kernel/events/Makefile b/kernel/events/Makefile new file mode 100644 index 000000000000..1ce23d3d8394 --- /dev/null +++ b/kernel/events/Makefile | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | ifdef CONFIG_FUNCTION_TRACER | ||
| 2 | CFLAGS_REMOVE_core.o = -pg | ||
| 3 | endif | ||
| 4 | |||
| 5 | obj-y := core.o | ||
| 6 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
diff --git a/kernel/perf_event.c b/kernel/events/core.c index 8e81a9860a0d..0fc34a370ba4 100644 --- a/kernel/perf_event.c +++ b/kernel/events/core.c | |||
| @@ -2,8 +2,8 @@ | |||
| 2 | * Performance events core code: | 2 | * Performance events core code: |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 8 | * | 8 | * |
| 9 | * For licensing details see kernel-base/COPYING | 9 | * For licensing details see kernel-base/COPYING |
| @@ -39,10 +39,10 @@ | |||
| 39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
| 40 | 40 | ||
| 41 | struct remote_function_call { | 41 | struct remote_function_call { |
| 42 | struct task_struct *p; | 42 | struct task_struct *p; |
| 43 | int (*func)(void *info); | 43 | int (*func)(void *info); |
| 44 | void *info; | 44 | void *info; |
| 45 | int ret; | 45 | int ret; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | static void remote_function(void *data) | 48 | static void remote_function(void *data) |
| @@ -76,10 +76,10 @@ static int | |||
| 76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | 76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) |
| 77 | { | 77 | { |
| 78 | struct remote_function_call data = { | 78 | struct remote_function_call data = { |
| 79 | .p = p, | 79 | .p = p, |
| 80 | .func = func, | 80 | .func = func, |
| 81 | .info = info, | 81 | .info = info, |
| 82 | .ret = -ESRCH, /* No such (running) process */ | 82 | .ret = -ESRCH, /* No such (running) process */ |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | if (task_curr(p)) | 85 | if (task_curr(p)) |
| @@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | |||
| 100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) | 100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) |
| 101 | { | 101 | { |
| 102 | struct remote_function_call data = { | 102 | struct remote_function_call data = { |
| 103 | .p = NULL, | 103 | .p = NULL, |
| 104 | .func = func, | 104 | .func = func, |
| 105 | .info = info, | 105 | .info = info, |
| 106 | .ret = -ENXIO, /* No such CPU */ | 106 | .ret = -ENXIO, /* No such CPU */ |
| 107 | }; | 107 | }; |
| 108 | 108 | ||
| 109 | smp_call_function_single(cpu, remote_function, &data, 1); | 109 | smp_call_function_single(cpu, remote_function, &data, 1); |
| @@ -125,7 +125,7 @@ enum event_type_t { | |||
| 125 | * perf_sched_events : >0 events exist | 125 | * perf_sched_events : >0 events exist |
| 126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
| 127 | */ | 127 | */ |
| 128 | atomic_t perf_sched_events __read_mostly; | 128 | struct jump_label_key perf_sched_events __read_mostly; |
| 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
| 130 | 130 | ||
| 131 | static atomic_t nr_mmap_events __read_mostly; | 131 | static atomic_t nr_mmap_events __read_mostly; |
| @@ -5429,7 +5429,7 @@ fail: | |||
| 5429 | return err; | 5429 | return err; |
| 5430 | } | 5430 | } |
| 5431 | 5431 | ||
| 5432 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 5432 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 5433 | 5433 | ||
| 5434 | static void sw_perf_event_destroy(struct perf_event *event) | 5434 | static void sw_perf_event_destroy(struct perf_event *event) |
| 5435 | { | 5435 | { |
| @@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
| 7445 | } | 7445 | } |
| 7446 | 7446 | ||
| 7447 | struct cgroup_subsys perf_subsys = { | 7447 | struct cgroup_subsys perf_subsys = { |
| 7448 | .name = "perf_event", | 7448 | .name = "perf_event", |
| 7449 | .subsys_id = perf_subsys_id, | 7449 | .subsys_id = perf_subsys_id, |
| 7450 | .create = perf_cgroup_create, | 7450 | .create = perf_cgroup_create, |
| 7451 | .destroy = perf_cgroup_destroy, | 7451 | .destroy = perf_cgroup_destroy, |
| 7452 | .exit = perf_cgroup_exit, | 7452 | .exit = perf_cgroup_exit, |
| 7453 | .attach = perf_cgroup_attach, | 7453 | .attach = perf_cgroup_attach, |
| 7454 | }; | 7454 | }; |
| 7455 | #endif /* CONFIG_CGROUP_PERF */ | 7455 | #endif /* CONFIG_CGROUP_PERF */ |
diff --git a/kernel/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 086adf25a55e..086adf25a55e 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
diff --git a/kernel/extable.c b/kernel/extable.c index 7f8f263f8524..c2d625fcda77 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
| @@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr) | |||
| 72 | return 0; | 72 | return 0; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | int core_kernel_data(unsigned long addr) | ||
| 76 | { | ||
| 77 | if (addr >= (unsigned long)_sdata && | ||
| 78 | addr < (unsigned long)_edata) | ||
| 79 | return 1; | ||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 75 | int __kernel_text_address(unsigned long addr) | 83 | int __kernel_text_address(unsigned long addr) |
| 76 | { | 84 | { |
| 77 | if (core_kernel_text(addr)) | 85 | if (core_kernel_text(addr)) |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3b79bd938330..74d1c099fbd1 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
| @@ -2,43 +2,23 @@ | |||
| 2 | * jump label support | 2 | * jump label support |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
| 5 | * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> | ||
| 5 | * | 6 | * |
| 6 | */ | 7 | */ |
| 7 | #include <linux/jump_label.h> | ||
| 8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
| 9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| 12 | #include <linux/jhash.h> | ||
| 13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 14 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
| 15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 15 | #include <linux/jump_label.h> | ||
| 16 | 16 | ||
| 17 | #ifdef HAVE_JUMP_LABEL | 17 | #ifdef HAVE_JUMP_LABEL |
| 18 | 18 | ||
| 19 | #define JUMP_LABEL_HASH_BITS 6 | ||
| 20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) | ||
| 21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; | ||
| 22 | |||
| 23 | /* mutex to protect coming/going of the the jump_label table */ | 19 | /* mutex to protect coming/going of the the jump_label table */ |
| 24 | static DEFINE_MUTEX(jump_label_mutex); | 20 | static DEFINE_MUTEX(jump_label_mutex); |
| 25 | 21 | ||
| 26 | struct jump_label_entry { | ||
| 27 | struct hlist_node hlist; | ||
| 28 | struct jump_entry *table; | ||
| 29 | int nr_entries; | ||
| 30 | /* hang modules off here */ | ||
| 31 | struct hlist_head modules; | ||
| 32 | unsigned long key; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct jump_label_module_entry { | ||
| 36 | struct hlist_node hlist; | ||
| 37 | struct jump_entry *table; | ||
| 38 | int nr_entries; | ||
| 39 | struct module *mod; | ||
| 40 | }; | ||
| 41 | |||
| 42 | void jump_label_lock(void) | 22 | void jump_label_lock(void) |
| 43 | { | 23 | { |
| 44 | mutex_lock(&jump_label_mutex); | 24 | mutex_lock(&jump_label_mutex); |
| @@ -49,6 +29,11 @@ void jump_label_unlock(void) | |||
| 49 | mutex_unlock(&jump_label_mutex); | 29 | mutex_unlock(&jump_label_mutex); |
| 50 | } | 30 | } |
| 51 | 31 | ||
| 32 | bool jump_label_enabled(struct jump_label_key *key) | ||
| 33 | { | ||
| 34 | return !!atomic_read(&key->enabled); | ||
| 35 | } | ||
| 36 | |||
| 52 | static int jump_label_cmp(const void *a, const void *b) | 37 | static int jump_label_cmp(const void *a, const void *b) |
| 53 | { | 38 | { |
| 54 | const struct jump_entry *jea = a; | 39 | const struct jump_entry *jea = a; |
| @@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b) | |||
| 64 | } | 49 | } |
| 65 | 50 | ||
| 66 | static void | 51 | static void |
| 67 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | 52 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
| 68 | { | 53 | { |
| 69 | unsigned long size; | 54 | unsigned long size; |
| 70 | 55 | ||
| @@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | |||
| 73 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 58 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
| 74 | } | 59 | } |
| 75 | 60 | ||
| 76 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) | 61 | static void jump_label_update(struct jump_label_key *key, int enable); |
| 77 | { | ||
| 78 | struct hlist_head *head; | ||
| 79 | struct hlist_node *node; | ||
| 80 | struct jump_label_entry *e; | ||
| 81 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
| 82 | |||
| 83 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
| 84 | hlist_for_each_entry(e, node, head, hlist) { | ||
| 85 | if (key == e->key) | ||
| 86 | return e; | ||
| 87 | } | ||
| 88 | return NULL; | ||
| 89 | } | ||
| 90 | 62 | ||
| 91 | static struct jump_label_entry * | 63 | void jump_label_inc(struct jump_label_key *key) |
| 92 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) | ||
| 93 | { | 64 | { |
| 94 | struct hlist_head *head; | 65 | if (atomic_inc_not_zero(&key->enabled)) |
| 95 | struct jump_label_entry *e; | 66 | return; |
| 96 | u32 hash; | ||
| 97 | |||
| 98 | e = get_jump_label_entry(key); | ||
| 99 | if (e) | ||
| 100 | return ERR_PTR(-EEXIST); | ||
| 101 | |||
| 102 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); | ||
| 103 | if (!e) | ||
| 104 | return ERR_PTR(-ENOMEM); | ||
| 105 | |||
| 106 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
| 107 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
| 108 | e->key = key; | ||
| 109 | e->table = table; | ||
| 110 | e->nr_entries = nr_entries; | ||
| 111 | INIT_HLIST_HEAD(&(e->modules)); | ||
| 112 | hlist_add_head(&e->hlist, head); | ||
| 113 | return e; | ||
| 114 | } | ||
| 115 | 67 | ||
| 116 | static int | 68 | jump_label_lock(); |
| 117 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) | 69 | if (atomic_add_return(1, &key->enabled) == 1) |
| 118 | { | 70 | jump_label_update(key, JUMP_LABEL_ENABLE); |
| 119 | struct jump_entry *iter, *iter_begin; | 71 | jump_label_unlock(); |
| 120 | struct jump_label_entry *entry; | ||
| 121 | int count; | ||
| 122 | |||
| 123 | sort_jump_label_entries(start, stop); | ||
| 124 | iter = start; | ||
| 125 | while (iter < stop) { | ||
| 126 | entry = get_jump_label_entry(iter->key); | ||
| 127 | if (!entry) { | ||
| 128 | iter_begin = iter; | ||
| 129 | count = 0; | ||
| 130 | while ((iter < stop) && | ||
| 131 | (iter->key == iter_begin->key)) { | ||
| 132 | iter++; | ||
| 133 | count++; | ||
| 134 | } | ||
| 135 | entry = add_jump_label_entry(iter_begin->key, | ||
| 136 | count, iter_begin); | ||
| 137 | if (IS_ERR(entry)) | ||
| 138 | return PTR_ERR(entry); | ||
| 139 | } else { | ||
| 140 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); | ||
| 141 | return -1; | ||
| 142 | } | ||
| 143 | } | ||
| 144 | return 0; | ||
| 145 | } | 72 | } |
| 146 | 73 | ||
| 147 | /*** | 74 | void jump_label_dec(struct jump_label_key *key) |
| 148 | * jump_label_update - update jump label text | ||
| 149 | * @key - key value associated with a a jump label | ||
| 150 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE | ||
| 151 | * | ||
| 152 | * Will enable/disable the jump for jump label @key, depending on the | ||
| 153 | * value of @type. | ||
| 154 | * | ||
| 155 | */ | ||
| 156 | |||
| 157 | void jump_label_update(unsigned long key, enum jump_label_type type) | ||
| 158 | { | 75 | { |
| 159 | struct jump_entry *iter; | 76 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) |
| 160 | struct jump_label_entry *entry; | 77 | return; |
| 161 | struct hlist_node *module_node; | ||
| 162 | struct jump_label_module_entry *e_module; | ||
| 163 | int count; | ||
| 164 | 78 | ||
| 165 | jump_label_lock(); | 79 | jump_label_update(key, JUMP_LABEL_DISABLE); |
| 166 | entry = get_jump_label_entry((jump_label_t)key); | ||
| 167 | if (entry) { | ||
| 168 | count = entry->nr_entries; | ||
| 169 | iter = entry->table; | ||
| 170 | while (count--) { | ||
| 171 | if (kernel_text_address(iter->code)) | ||
| 172 | arch_jump_label_transform(iter, type); | ||
| 173 | iter++; | ||
| 174 | } | ||
| 175 | /* eanble/disable jump labels in modules */ | ||
| 176 | hlist_for_each_entry(e_module, module_node, &(entry->modules), | ||
| 177 | hlist) { | ||
| 178 | count = e_module->nr_entries; | ||
| 179 | iter = e_module->table; | ||
| 180 | while (count--) { | ||
| 181 | if (iter->key && | ||
| 182 | kernel_text_address(iter->code)) | ||
| 183 | arch_jump_label_transform(iter, type); | ||
| 184 | iter++; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | jump_label_unlock(); | 80 | jump_label_unlock(); |
| 189 | } | 81 | } |
| 190 | 82 | ||
| @@ -197,77 +89,33 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) | |||
| 197 | return 0; | 89 | return 0; |
| 198 | } | 90 | } |
| 199 | 91 | ||
| 200 | #ifdef CONFIG_MODULES | 92 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
| 201 | 93 | struct jump_entry *iter_stop, void *start, void *end) | |
| 202 | static int module_conflict(void *start, void *end) | ||
| 203 | { | 94 | { |
| 204 | struct hlist_head *head; | ||
| 205 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
| 206 | struct jump_label_entry *e; | ||
| 207 | struct jump_label_module_entry *e_module; | ||
| 208 | struct jump_entry *iter; | 95 | struct jump_entry *iter; |
| 209 | int i, count; | ||
| 210 | int conflict = 0; | ||
| 211 | |||
| 212 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
| 213 | head = &jump_label_table[i]; | ||
| 214 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
| 215 | hlist_for_each_entry_safe(e_module, module_node, | ||
| 216 | module_node_next, | ||
| 217 | &(e->modules), hlist) { | ||
| 218 | count = e_module->nr_entries; | ||
| 219 | iter = e_module->table; | ||
| 220 | while (count--) { | ||
| 221 | if (addr_conflict(iter, start, end)) { | ||
| 222 | conflict = 1; | ||
| 223 | goto out; | ||
| 224 | } | ||
| 225 | iter++; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | } | ||
| 229 | } | ||
| 230 | out: | ||
| 231 | return conflict; | ||
| 232 | } | ||
| 233 | |||
| 234 | #endif | ||
| 235 | |||
| 236 | /*** | ||
| 237 | * jump_label_text_reserved - check if addr range is reserved | ||
| 238 | * @start: start text addr | ||
| 239 | * @end: end text addr | ||
| 240 | * | ||
| 241 | * checks if the text addr located between @start and @end | ||
| 242 | * overlaps with any of the jump label patch addresses. Code | ||
| 243 | * that wants to modify kernel text should first verify that | ||
| 244 | * it does not overlap with any of the jump label addresses. | ||
| 245 | * Caller must hold jump_label_mutex. | ||
| 246 | * | ||
| 247 | * returns 1 if there is an overlap, 0 otherwise | ||
| 248 | */ | ||
| 249 | int jump_label_text_reserved(void *start, void *end) | ||
| 250 | { | ||
| 251 | struct jump_entry *iter; | ||
| 252 | struct jump_entry *iter_start = __start___jump_table; | ||
| 253 | struct jump_entry *iter_stop = __start___jump_table; | ||
| 254 | int conflict = 0; | ||
| 255 | 96 | ||
| 256 | iter = iter_start; | 97 | iter = iter_start; |
| 257 | while (iter < iter_stop) { | 98 | while (iter < iter_stop) { |
| 258 | if (addr_conflict(iter, start, end)) { | 99 | if (addr_conflict(iter, start, end)) |
| 259 | conflict = 1; | 100 | return 1; |
| 260 | goto out; | ||
| 261 | } | ||
| 262 | iter++; | 101 | iter++; |
| 263 | } | 102 | } |
| 264 | 103 | ||
| 265 | /* now check modules */ | 104 | return 0; |
| 266 | #ifdef CONFIG_MODULES | 105 | } |
| 267 | conflict = module_conflict(start, end); | 106 | |
| 268 | #endif | 107 | static void __jump_label_update(struct jump_label_key *key, |
| 269 | out: | 108 | struct jump_entry *entry, int enable) |
| 270 | return conflict; | 109 | { |
| 110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | ||
| 111 | /* | ||
| 112 | * entry->code set to 0 invalidates module init text sections | ||
| 113 | * kernel_text_address() verifies we are not in core kernel | ||
| 114 | * init code, see jump_label_invalidate_module_init(). | ||
| 115 | */ | ||
| 116 | if (entry->code && kernel_text_address(entry->code)) | ||
| 117 | arch_jump_label_transform(entry, enable); | ||
| 118 | } | ||
| 271 | } | 119 | } |
| 272 | 120 | ||
| 273 | /* | 121 | /* |
| @@ -277,142 +125,173 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) | |||
| 277 | { | 125 | { |
| 278 | } | 126 | } |
| 279 | 127 | ||
| 280 | static __init int init_jump_label(void) | 128 | static __init int jump_label_init(void) |
| 281 | { | 129 | { |
| 282 | int ret; | ||
| 283 | struct jump_entry *iter_start = __start___jump_table; | 130 | struct jump_entry *iter_start = __start___jump_table; |
| 284 | struct jump_entry *iter_stop = __stop___jump_table; | 131 | struct jump_entry *iter_stop = __stop___jump_table; |
| 132 | struct jump_label_key *key = NULL; | ||
| 285 | struct jump_entry *iter; | 133 | struct jump_entry *iter; |
| 286 | 134 | ||
| 287 | jump_label_lock(); | 135 | jump_label_lock(); |
| 288 | ret = build_jump_label_hashtable(__start___jump_table, | 136 | jump_label_sort_entries(iter_start, iter_stop); |
| 289 | __stop___jump_table); | 137 | |
| 290 | iter = iter_start; | 138 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 291 | while (iter < iter_stop) { | ||
| 292 | arch_jump_label_text_poke_early(iter->code); | 139 | arch_jump_label_text_poke_early(iter->code); |
| 293 | iter++; | 140 | if (iter->key == (jump_label_t)(unsigned long)key) |
| 141 | continue; | ||
| 142 | |||
| 143 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
| 144 | atomic_set(&key->enabled, 0); | ||
| 145 | key->entries = iter; | ||
| 146 | #ifdef CONFIG_MODULES | ||
| 147 | key->next = NULL; | ||
| 148 | #endif | ||
| 294 | } | 149 | } |
| 295 | jump_label_unlock(); | 150 | jump_label_unlock(); |
| 296 | return ret; | 151 | |
| 152 | return 0; | ||
| 297 | } | 153 | } |
| 298 | early_initcall(init_jump_label); | 154 | early_initcall(jump_label_init); |
| 299 | 155 | ||
| 300 | #ifdef CONFIG_MODULES | 156 | #ifdef CONFIG_MODULES |
| 301 | 157 | ||
| 302 | static struct jump_label_module_entry * | 158 | struct jump_label_mod { |
| 303 | add_jump_label_module_entry(struct jump_label_entry *entry, | 159 | struct jump_label_mod *next; |
| 304 | struct jump_entry *iter_begin, | 160 | struct jump_entry *entries; |
| 305 | int count, struct module *mod) | 161 | struct module *mod; |
| 162 | }; | ||
| 163 | |||
| 164 | static int __jump_label_mod_text_reserved(void *start, void *end) | ||
| 165 | { | ||
| 166 | struct module *mod; | ||
| 167 | |||
| 168 | mod = __module_text_address((unsigned long)start); | ||
| 169 | if (!mod) | ||
| 170 | return 0; | ||
| 171 | |||
| 172 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); | ||
| 173 | |||
| 174 | return __jump_label_text_reserved(mod->jump_entries, | ||
| 175 | mod->jump_entries + mod->num_jump_entries, | ||
| 176 | start, end); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void __jump_label_mod_update(struct jump_label_key *key, int enable) | ||
| 180 | { | ||
| 181 | struct jump_label_mod *mod = key->next; | ||
| 182 | |||
| 183 | while (mod) { | ||
| 184 | __jump_label_update(key, mod->entries, enable); | ||
| 185 | mod = mod->next; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | /*** | ||
| 190 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
| 191 | * @mod: module to patch | ||
| 192 | * | ||
| 193 | * Allow for run-time selection of the optimal nops. Before the module | ||
| 194 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
| 195 | * the arch specific jump label code. | ||
| 196 | */ | ||
| 197 | void jump_label_apply_nops(struct module *mod) | ||
| 306 | { | 198 | { |
| 307 | struct jump_label_module_entry *e; | 199 | struct jump_entry *iter_start = mod->jump_entries; |
| 308 | 200 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
| 309 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); | 201 | struct jump_entry *iter; |
| 310 | if (!e) | 202 | |
| 311 | return ERR_PTR(-ENOMEM); | 203 | /* if the module doesn't have jump label entries, just return */ |
| 312 | e->mod = mod; | 204 | if (iter_start == iter_stop) |
| 313 | e->nr_entries = count; | 205 | return; |
| 314 | e->table = iter_begin; | 206 | |
| 315 | hlist_add_head(&e->hlist, &entry->modules); | 207 | for (iter = iter_start; iter < iter_stop; iter++) |
| 316 | return e; | 208 | arch_jump_label_text_poke_early(iter->code); |
| 317 | } | 209 | } |
| 318 | 210 | ||
| 319 | static int add_jump_label_module(struct module *mod) | 211 | static int jump_label_add_module(struct module *mod) |
| 320 | { | 212 | { |
| 321 | struct jump_entry *iter, *iter_begin; | 213 | struct jump_entry *iter_start = mod->jump_entries; |
| 322 | struct jump_label_entry *entry; | 214 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 323 | struct jump_label_module_entry *module_entry; | 215 | struct jump_entry *iter; |
| 324 | int count; | 216 | struct jump_label_key *key = NULL; |
| 217 | struct jump_label_mod *jlm; | ||
| 325 | 218 | ||
| 326 | /* if the module doesn't have jump label entries, just return */ | 219 | /* if the module doesn't have jump label entries, just return */ |
| 327 | if (!mod->num_jump_entries) | 220 | if (iter_start == iter_stop) |
| 328 | return 0; | 221 | return 0; |
| 329 | 222 | ||
| 330 | sort_jump_label_entries(mod->jump_entries, | 223 | jump_label_sort_entries(iter_start, iter_stop); |
| 331 | mod->jump_entries + mod->num_jump_entries); | 224 | |
| 332 | iter = mod->jump_entries; | 225 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 333 | while (iter < mod->jump_entries + mod->num_jump_entries) { | 226 | if (iter->key == (jump_label_t)(unsigned long)key) |
| 334 | entry = get_jump_label_entry(iter->key); | 227 | continue; |
| 335 | iter_begin = iter; | 228 | |
| 336 | count = 0; | 229 | key = (struct jump_label_key *)(unsigned long)iter->key; |
| 337 | while ((iter < mod->jump_entries + mod->num_jump_entries) && | 230 | |
| 338 | (iter->key == iter_begin->key)) { | 231 | if (__module_address(iter->key) == mod) { |
| 339 | iter++; | 232 | atomic_set(&key->enabled, 0); |
| 340 | count++; | 233 | key->entries = iter; |
| 341 | } | 234 | key->next = NULL; |
| 342 | if (!entry) { | 235 | continue; |
| 343 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); | ||
| 344 | if (IS_ERR(entry)) | ||
| 345 | return PTR_ERR(entry); | ||
| 346 | } | 236 | } |
| 347 | module_entry = add_jump_label_module_entry(entry, iter_begin, | 237 | |
| 348 | count, mod); | 238 | jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); |
| 349 | if (IS_ERR(module_entry)) | 239 | if (!jlm) |
| 350 | return PTR_ERR(module_entry); | 240 | return -ENOMEM; |
| 241 | |||
| 242 | jlm->mod = mod; | ||
| 243 | jlm->entries = iter; | ||
| 244 | jlm->next = key->next; | ||
| 245 | key->next = jlm; | ||
| 246 | |||
| 247 | if (jump_label_enabled(key)) | ||
| 248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | ||
| 351 | } | 249 | } |
| 250 | |||
| 352 | return 0; | 251 | return 0; |
| 353 | } | 252 | } |
| 354 | 253 | ||
| 355 | static void remove_jump_label_module(struct module *mod) | 254 | static void jump_label_del_module(struct module *mod) |
| 356 | { | 255 | { |
| 357 | struct hlist_head *head; | 256 | struct jump_entry *iter_start = mod->jump_entries; |
| 358 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 257 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 359 | struct jump_label_entry *e; | 258 | struct jump_entry *iter; |
| 360 | struct jump_label_module_entry *e_module; | 259 | struct jump_label_key *key = NULL; |
| 361 | int i; | 260 | struct jump_label_mod *jlm, **prev; |
| 362 | 261 | ||
| 363 | /* if the module doesn't have jump label entries, just return */ | 262 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 364 | if (!mod->num_jump_entries) | 263 | if (iter->key == (jump_label_t)(unsigned long)key) |
| 365 | return; | 264 | continue; |
| 265 | |||
| 266 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
| 267 | |||
| 268 | if (__module_address(iter->key) == mod) | ||
| 269 | continue; | ||
| 270 | |||
| 271 | prev = &key->next; | ||
| 272 | jlm = key->next; | ||
| 366 | 273 | ||
| 367 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 274 | while (jlm && jlm->mod != mod) { |
| 368 | head = &jump_label_table[i]; | 275 | prev = &jlm->next; |
| 369 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 276 | jlm = jlm->next; |
| 370 | hlist_for_each_entry_safe(e_module, module_node, | 277 | } |
| 371 | module_node_next, | 278 | |
| 372 | &(e->modules), hlist) { | 279 | if (jlm) { |
| 373 | if (e_module->mod == mod) { | 280 | *prev = jlm->next; |
| 374 | hlist_del(&e_module->hlist); | 281 | kfree(jlm); |
| 375 | kfree(e_module); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { | ||
| 379 | hlist_del(&e->hlist); | ||
| 380 | kfree(e); | ||
| 381 | } | ||
| 382 | } | 282 | } |
| 383 | } | 283 | } |
| 384 | } | 284 | } |
| 385 | 285 | ||
| 386 | static void remove_jump_label_module_init(struct module *mod) | 286 | static void jump_label_invalidate_module_init(struct module *mod) |
| 387 | { | 287 | { |
| 388 | struct hlist_head *head; | 288 | struct jump_entry *iter_start = mod->jump_entries; |
| 389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 289 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 390 | struct jump_label_entry *e; | ||
| 391 | struct jump_label_module_entry *e_module; | ||
| 392 | struct jump_entry *iter; | 290 | struct jump_entry *iter; |
| 393 | int i, count; | ||
| 394 | |||
| 395 | /* if the module doesn't have jump label entries, just return */ | ||
| 396 | if (!mod->num_jump_entries) | ||
| 397 | return; | ||
| 398 | 291 | ||
| 399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 292 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 400 | head = &jump_label_table[i]; | 293 | if (within_module_init(iter->code, mod)) |
| 401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 294 | iter->code = 0; |
| 402 | hlist_for_each_entry_safe(e_module, module_node, | ||
| 403 | module_node_next, | ||
| 404 | &(e->modules), hlist) { | ||
| 405 | if (e_module->mod != mod) | ||
| 406 | continue; | ||
| 407 | count = e_module->nr_entries; | ||
| 408 | iter = e_module->table; | ||
| 409 | while (count--) { | ||
| 410 | if (within_module_init(iter->code, mod)) | ||
| 411 | iter->key = 0; | ||
| 412 | iter++; | ||
| 413 | } | ||
| 414 | } | ||
| 415 | } | ||
| 416 | } | 295 | } |
| 417 | } | 296 | } |
| 418 | 297 | ||
| @@ -426,59 +305,77 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
| 426 | switch (val) { | 305 | switch (val) { |
| 427 | case MODULE_STATE_COMING: | 306 | case MODULE_STATE_COMING: |
| 428 | jump_label_lock(); | 307 | jump_label_lock(); |
| 429 | ret = add_jump_label_module(mod); | 308 | ret = jump_label_add_module(mod); |
| 430 | if (ret) | 309 | if (ret) |
| 431 | remove_jump_label_module(mod); | 310 | jump_label_del_module(mod); |
| 432 | jump_label_unlock(); | 311 | jump_label_unlock(); |
| 433 | break; | 312 | break; |
| 434 | case MODULE_STATE_GOING: | 313 | case MODULE_STATE_GOING: |
| 435 | jump_label_lock(); | 314 | jump_label_lock(); |
| 436 | remove_jump_label_module(mod); | 315 | jump_label_del_module(mod); |
| 437 | jump_label_unlock(); | 316 | jump_label_unlock(); |
| 438 | break; | 317 | break; |
| 439 | case MODULE_STATE_LIVE: | 318 | case MODULE_STATE_LIVE: |
| 440 | jump_label_lock(); | 319 | jump_label_lock(); |
| 441 | remove_jump_label_module_init(mod); | 320 | jump_label_invalidate_module_init(mod); |
| 442 | jump_label_unlock(); | 321 | jump_label_unlock(); |
| 443 | break; | 322 | break; |
| 444 | } | 323 | } |
| 445 | return ret; | ||
| 446 | } | ||
| 447 | 324 | ||
| 448 | /*** | 325 | return notifier_from_errno(ret); |
| 449 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
| 450 | * @mod: module to patch | ||
| 451 | * | ||
| 452 | * Allow for run-time selection of the optimal nops. Before the module | ||
| 453 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
| 454 | * the arch specific jump label code. | ||
| 455 | */ | ||
| 456 | void jump_label_apply_nops(struct module *mod) | ||
| 457 | { | ||
| 458 | struct jump_entry *iter; | ||
| 459 | |||
| 460 | /* if the module doesn't have jump label entries, just return */ | ||
| 461 | if (!mod->num_jump_entries) | ||
| 462 | return; | ||
| 463 | |||
| 464 | iter = mod->jump_entries; | ||
| 465 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
| 466 | arch_jump_label_text_poke_early(iter->code); | ||
| 467 | iter++; | ||
| 468 | } | ||
| 469 | } | 326 | } |
| 470 | 327 | ||
| 471 | struct notifier_block jump_label_module_nb = { | 328 | struct notifier_block jump_label_module_nb = { |
| 472 | .notifier_call = jump_label_module_notify, | 329 | .notifier_call = jump_label_module_notify, |
| 473 | .priority = 0, | 330 | .priority = 1, /* higher than tracepoints */ |
| 474 | }; | 331 | }; |
| 475 | 332 | ||
| 476 | static __init int init_jump_label_module(void) | 333 | static __init int jump_label_init_module(void) |
| 477 | { | 334 | { |
| 478 | return register_module_notifier(&jump_label_module_nb); | 335 | return register_module_notifier(&jump_label_module_nb); |
| 479 | } | 336 | } |
| 480 | early_initcall(init_jump_label_module); | 337 | early_initcall(jump_label_init_module); |
| 481 | 338 | ||
| 482 | #endif /* CONFIG_MODULES */ | 339 | #endif /* CONFIG_MODULES */ |
| 483 | 340 | ||
| 341 | /*** | ||
| 342 | * jump_label_text_reserved - check if addr range is reserved | ||
| 343 | * @start: start text addr | ||
| 344 | * @end: end text addr | ||
| 345 | * | ||
| 346 | * checks if the text addr located between @start and @end | ||
| 347 | * overlaps with any of the jump label patch addresses. Code | ||
| 348 | * that wants to modify kernel text should first verify that | ||
| 349 | * it does not overlap with any of the jump label addresses. | ||
| 350 | * Caller must hold jump_label_mutex. | ||
| 351 | * | ||
| 352 | * returns 1 if there is an overlap, 0 otherwise | ||
| 353 | */ | ||
| 354 | int jump_label_text_reserved(void *start, void *end) | ||
| 355 | { | ||
| 356 | int ret = __jump_label_text_reserved(__start___jump_table, | ||
| 357 | __stop___jump_table, start, end); | ||
| 358 | |||
| 359 | if (ret) | ||
| 360 | return ret; | ||
| 361 | |||
| 362 | #ifdef CONFIG_MODULES | ||
| 363 | ret = __jump_label_mod_text_reserved(start, end); | ||
| 364 | #endif | ||
| 365 | return ret; | ||
| 366 | } | ||
| 367 | |||
| 368 | static void jump_label_update(struct jump_label_key *key, int enable) | ||
| 369 | { | ||
| 370 | struct jump_entry *entry = key->entries; | ||
| 371 | |||
| 372 | /* if there are no users, entry can be NULL */ | ||
| 373 | if (entry) | ||
| 374 | __jump_label_update(key, entry, enable); | ||
| 375 | |||
| 376 | #ifdef CONFIG_MODULES | ||
| 377 | __jump_label_mod_update(key, enable); | ||
| 378 | #endif | ||
| 379 | } | ||
| 380 | |||
| 484 | #endif | 381 | #endif |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ee24fa1935ac..d017c2c82c44 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -39,20 +39,26 @@ | |||
| 39 | #include "trace_stat.h" | 39 | #include "trace_stat.h" |
| 40 | 40 | ||
| 41 | #define FTRACE_WARN_ON(cond) \ | 41 | #define FTRACE_WARN_ON(cond) \ |
| 42 | do { \ | 42 | ({ \ |
| 43 | if (WARN_ON(cond)) \ | 43 | int ___r = cond; \ |
| 44 | if (WARN_ON(___r)) \ | ||
| 44 | ftrace_kill(); \ | 45 | ftrace_kill(); \ |
| 45 | } while (0) | 46 | ___r; \ |
| 47 | }) | ||
| 46 | 48 | ||
| 47 | #define FTRACE_WARN_ON_ONCE(cond) \ | 49 | #define FTRACE_WARN_ON_ONCE(cond) \ |
| 48 | do { \ | 50 | ({ \ |
| 49 | if (WARN_ON_ONCE(cond)) \ | 51 | int ___r = cond; \ |
| 52 | if (WARN_ON_ONCE(___r)) \ | ||
| 50 | ftrace_kill(); \ | 53 | ftrace_kill(); \ |
| 51 | } while (0) | 54 | ___r; \ |
| 55 | }) | ||
| 52 | 56 | ||
| 53 | /* hash bits for specific function selection */ | 57 | /* hash bits for specific function selection */ |
| 54 | #define FTRACE_HASH_BITS 7 | 58 | #define FTRACE_HASH_BITS 7 |
| 55 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
| 60 | #define FTRACE_HASH_DEFAULT_BITS 10 | ||
| 61 | #define FTRACE_HASH_MAX_BITS 12 | ||
| 56 | 62 | ||
| 57 | /* ftrace_enabled is a method to turn ftrace on or off */ | 63 | /* ftrace_enabled is a method to turn ftrace on or off */ |
| 58 | int ftrace_enabled __read_mostly; | 64 | int ftrace_enabled __read_mostly; |
| @@ -81,23 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
| 81 | .func = ftrace_stub, | 87 | .func = ftrace_stub, |
| 82 | }; | 88 | }; |
| 83 | 89 | ||
| 84 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 90 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
| 91 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | ||
| 85 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 92 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
| 86 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 93 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
| 87 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 94 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
| 95 | static struct ftrace_ops global_ops; | ||
| 96 | |||
| 97 | static void | ||
| 98 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | ||
| 88 | 99 | ||
| 89 | /* | 100 | /* |
| 90 | * Traverse the ftrace_list, invoking all entries. The reason that we | 101 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
| 91 | * can use rcu_dereference_raw() is that elements removed from this list | 102 | * can use rcu_dereference_raw() is that elements removed from this list |
| 92 | * are simply leaked, so there is no need to interact with a grace-period | 103 | * are simply leaked, so there is no need to interact with a grace-period |
| 93 | * mechanism. The rcu_dereference_raw() calls are needed to handle | 104 | * mechanism. The rcu_dereference_raw() calls are needed to handle |
| 94 | * concurrent insertions into the ftrace_list. | 105 | * concurrent insertions into the ftrace_global_list. |
| 95 | * | 106 | * |
| 96 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 107 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
| 97 | */ | 108 | */ |
| 98 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 109 | static void ftrace_global_list_func(unsigned long ip, |
| 110 | unsigned long parent_ip) | ||
| 99 | { | 111 | { |
| 100 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ | 112 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ |
| 101 | 113 | ||
| 102 | while (op != &ftrace_list_end) { | 114 | while (op != &ftrace_list_end) { |
| 103 | op->func(ip, parent_ip); | 115 | op->func(ip, parent_ip); |
| @@ -147,46 +159,69 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
| 147 | } | 159 | } |
| 148 | #endif | 160 | #endif |
| 149 | 161 | ||
| 150 | static int __register_ftrace_function(struct ftrace_ops *ops) | 162 | static void update_global_ops(void) |
| 151 | { | 163 | { |
| 152 | ops->next = ftrace_list; | 164 | ftrace_func_t func; |
| 165 | |||
| 153 | /* | 166 | /* |
| 154 | * We are entering ops into the ftrace_list but another | 167 | * If there's only one function registered, then call that |
| 155 | * CPU might be walking that list. We need to make sure | 168 | * function directly. Otherwise, we need to iterate over the |
| 156 | * the ops->next pointer is valid before another CPU sees | 169 | * registered callers. |
| 157 | * the ops pointer included into the ftrace_list. | ||
| 158 | */ | 170 | */ |
| 159 | rcu_assign_pointer(ftrace_list, ops); | 171 | if (ftrace_global_list == &ftrace_list_end || |
| 172 | ftrace_global_list->next == &ftrace_list_end) | ||
| 173 | func = ftrace_global_list->func; | ||
| 174 | else | ||
| 175 | func = ftrace_global_list_func; | ||
| 160 | 176 | ||
| 161 | if (ftrace_enabled) { | 177 | /* If we filter on pids, update to use the pid function */ |
| 162 | ftrace_func_t func; | 178 | if (!list_empty(&ftrace_pids)) { |
| 179 | set_ftrace_pid_function(func); | ||
| 180 | func = ftrace_pid_func; | ||
| 181 | } | ||
| 163 | 182 | ||
| 164 | if (ops->next == &ftrace_list_end) | 183 | global_ops.func = func; |
| 165 | func = ops->func; | 184 | } |
| 166 | else | ||
| 167 | func = ftrace_list_func; | ||
| 168 | 185 | ||
| 169 | if (!list_empty(&ftrace_pids)) { | 186 | static void update_ftrace_function(void) |
| 170 | set_ftrace_pid_function(func); | 187 | { |
| 171 | func = ftrace_pid_func; | 188 | ftrace_func_t func; |
| 172 | } | 189 | |
| 190 | update_global_ops(); | ||
| 191 | |||
| 192 | /* | ||
| 193 | * If we are at the end of the list and this ops is | ||
| 194 | * not dynamic, then have the mcount trampoline call | ||
| 195 | * the function directly | ||
| 196 | */ | ||
| 197 | if (ftrace_ops_list == &ftrace_list_end || | ||
| 198 | (ftrace_ops_list->next == &ftrace_list_end && | ||
| 199 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | ||
| 200 | func = ftrace_ops_list->func; | ||
| 201 | else | ||
| 202 | func = ftrace_ops_list_func; | ||
| 173 | 203 | ||
| 174 | /* | ||
| 175 | * For one func, simply call it directly. | ||
| 176 | * For more than one func, call the chain. | ||
| 177 | */ | ||
| 178 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 204 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 179 | ftrace_trace_function = func; | 205 | ftrace_trace_function = func; |
| 180 | #else | 206 | #else |
| 181 | __ftrace_trace_function = func; | 207 | __ftrace_trace_function = func; |
| 182 | ftrace_trace_function = ftrace_test_stop_func; | 208 | ftrace_trace_function = ftrace_test_stop_func; |
| 183 | #endif | 209 | #endif |
| 184 | } | 210 | } |
| 185 | 211 | ||
| 186 | return 0; | 212 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
| 213 | { | ||
| 214 | ops->next = *list; | ||
| 215 | /* | ||
| 216 | * We are entering ops into the list but another | ||
| 217 | * CPU might be walking that list. We need to make sure | ||
| 218 | * the ops->next pointer is valid before another CPU sees | ||
| 219 | * the ops pointer included into the list. | ||
| 220 | */ | ||
| 221 | rcu_assign_pointer(*list, ops); | ||
| 187 | } | 222 | } |
| 188 | 223 | ||
| 189 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 224 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
| 190 | { | 225 | { |
| 191 | struct ftrace_ops **p; | 226 | struct ftrace_ops **p; |
| 192 | 227 | ||
| @@ -194,13 +229,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 194 | * If we are removing the last function, then simply point | 229 | * If we are removing the last function, then simply point |
| 195 | * to the ftrace_stub. | 230 | * to the ftrace_stub. |
| 196 | */ | 231 | */ |
| 197 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 232 | if (*list == ops && ops->next == &ftrace_list_end) { |
| 198 | ftrace_trace_function = ftrace_stub; | 233 | *list = &ftrace_list_end; |
| 199 | ftrace_list = &ftrace_list_end; | ||
| 200 | return 0; | 234 | return 0; |
| 201 | } | 235 | } |
| 202 | 236 | ||
| 203 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 237 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
| 204 | if (*p == ops) | 238 | if (*p == ops) |
| 205 | break; | 239 | break; |
| 206 | 240 | ||
| @@ -208,53 +242,83 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 208 | return -1; | 242 | return -1; |
| 209 | 243 | ||
| 210 | *p = (*p)->next; | 244 | *p = (*p)->next; |
| 245 | return 0; | ||
| 246 | } | ||
| 211 | 247 | ||
| 212 | if (ftrace_enabled) { | 248 | static int __register_ftrace_function(struct ftrace_ops *ops) |
| 213 | /* If we only have one func left, then call that directly */ | 249 | { |
| 214 | if (ftrace_list->next == &ftrace_list_end) { | 250 | if (ftrace_disabled) |
| 215 | ftrace_func_t func = ftrace_list->func; | 251 | return -ENODEV; |
| 216 | 252 | ||
| 217 | if (!list_empty(&ftrace_pids)) { | 253 | if (FTRACE_WARN_ON(ops == &global_ops)) |
| 218 | set_ftrace_pid_function(func); | 254 | return -EINVAL; |
| 219 | func = ftrace_pid_func; | 255 | |
| 220 | } | 256 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
| 221 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 257 | return -EBUSY; |
| 222 | ftrace_trace_function = func; | 258 | |
| 223 | #else | 259 | if (!core_kernel_data((unsigned long)ops)) |
| 224 | __ftrace_trace_function = func; | 260 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
| 225 | #endif | 261 | |
| 226 | } | 262 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
| 227 | } | 263 | int first = ftrace_global_list == &ftrace_list_end; |
| 264 | add_ftrace_ops(&ftrace_global_list, ops); | ||
| 265 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
| 266 | if (first) | ||
| 267 | add_ftrace_ops(&ftrace_ops_list, &global_ops); | ||
| 268 | } else | ||
| 269 | add_ftrace_ops(&ftrace_ops_list, ops); | ||
| 270 | |||
| 271 | if (ftrace_enabled) | ||
| 272 | update_ftrace_function(); | ||
| 228 | 273 | ||
| 229 | return 0; | 274 | return 0; |
| 230 | } | 275 | } |
| 231 | 276 | ||
| 232 | static void ftrace_update_pid_func(void) | 277 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
| 233 | { | 278 | { |
| 234 | ftrace_func_t func; | 279 | int ret; |
| 235 | 280 | ||
| 236 | if (ftrace_trace_function == ftrace_stub) | 281 | if (ftrace_disabled) |
| 237 | return; | 282 | return -ENODEV; |
| 238 | 283 | ||
| 239 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 284 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
| 240 | func = ftrace_trace_function; | 285 | return -EBUSY; |
| 241 | #else | ||
| 242 | func = __ftrace_trace_function; | ||
| 243 | #endif | ||
| 244 | 286 | ||
| 245 | if (!list_empty(&ftrace_pids)) { | 287 | if (FTRACE_WARN_ON(ops == &global_ops)) |
| 246 | set_ftrace_pid_function(func); | 288 | return -EINVAL; |
| 247 | func = ftrace_pid_func; | ||
| 248 | } else { | ||
| 249 | if (func == ftrace_pid_func) | ||
| 250 | func = ftrace_pid_function; | ||
| 251 | } | ||
| 252 | 289 | ||
| 253 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 290 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
| 254 | ftrace_trace_function = func; | 291 | ret = remove_ftrace_ops(&ftrace_global_list, ops); |
| 255 | #else | 292 | if (!ret && ftrace_global_list == &ftrace_list_end) |
| 256 | __ftrace_trace_function = func; | 293 | ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); |
| 257 | #endif | 294 | if (!ret) |
| 295 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
| 296 | } else | ||
| 297 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | ||
| 298 | |||
| 299 | if (ret < 0) | ||
| 300 | return ret; | ||
| 301 | |||
| 302 | if (ftrace_enabled) | ||
| 303 | update_ftrace_function(); | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Dynamic ops may be freed, we must make sure that all | ||
| 307 | * callers are done before leaving this function. | ||
| 308 | */ | ||
| 309 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
| 310 | synchronize_sched(); | ||
| 311 | |||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | static void ftrace_update_pid_func(void) | ||
| 316 | { | ||
| 317 | /* Only do something if we are tracing something */ | ||
| 318 | if (ftrace_trace_function == ftrace_stub) | ||
| 319 | return; | ||
| 320 | |||
| 321 | update_ftrace_function(); | ||
| 258 | } | 322 | } |
| 259 | 323 | ||
| 260 | #ifdef CONFIG_FUNCTION_PROFILER | 324 | #ifdef CONFIG_FUNCTION_PROFILER |
| @@ -888,8 +952,35 @@ enum { | |||
| 888 | FTRACE_START_FUNC_RET = (1 << 3), | 952 | FTRACE_START_FUNC_RET = (1 << 3), |
| 889 | FTRACE_STOP_FUNC_RET = (1 << 4), | 953 | FTRACE_STOP_FUNC_RET = (1 << 4), |
| 890 | }; | 954 | }; |
| 955 | struct ftrace_func_entry { | ||
| 956 | struct hlist_node hlist; | ||
| 957 | unsigned long ip; | ||
| 958 | }; | ||
| 891 | 959 | ||
| 892 | static int ftrace_filtered; | 960 | struct ftrace_hash { |
| 961 | unsigned long size_bits; | ||
| 962 | struct hlist_head *buckets; | ||
| 963 | unsigned long count; | ||
| 964 | struct rcu_head rcu; | ||
| 965 | }; | ||
| 966 | |||
| 967 | /* | ||
| 968 | * We make these constant because no one should touch them, | ||
| 969 | * but they are used as the default "empty hash", to avoid allocating | ||
| 970 | * it all the time. These are in a read only section such that if | ||
| 971 | * anyone does try to modify it, it will cause an exception. | ||
| 972 | */ | ||
| 973 | static const struct hlist_head empty_buckets[1]; | ||
| 974 | static const struct ftrace_hash empty_hash = { | ||
| 975 | .buckets = (struct hlist_head *)empty_buckets, | ||
| 976 | }; | ||
| 977 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | ||
| 978 | |||
| 979 | static struct ftrace_ops global_ops = { | ||
| 980 | .func = ftrace_stub, | ||
| 981 | .notrace_hash = EMPTY_HASH, | ||
| 982 | .filter_hash = EMPTY_HASH, | ||
| 983 | }; | ||
| 893 | 984 | ||
| 894 | static struct dyn_ftrace *ftrace_new_addrs; | 985 | static struct dyn_ftrace *ftrace_new_addrs; |
| 895 | 986 | ||
| @@ -912,6 +1003,269 @@ static struct ftrace_page *ftrace_pages; | |||
| 912 | 1003 | ||
| 913 | static struct dyn_ftrace *ftrace_free_records; | 1004 | static struct dyn_ftrace *ftrace_free_records; |
| 914 | 1005 | ||
| 1006 | static struct ftrace_func_entry * | ||
| 1007 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | ||
| 1008 | { | ||
| 1009 | unsigned long key; | ||
| 1010 | struct ftrace_func_entry *entry; | ||
| 1011 | struct hlist_head *hhd; | ||
| 1012 | struct hlist_node *n; | ||
| 1013 | |||
| 1014 | if (!hash->count) | ||
| 1015 | return NULL; | ||
| 1016 | |||
| 1017 | if (hash->size_bits > 0) | ||
| 1018 | key = hash_long(ip, hash->size_bits); | ||
| 1019 | else | ||
| 1020 | key = 0; | ||
| 1021 | |||
| 1022 | hhd = &hash->buckets[key]; | ||
| 1023 | |||
| 1024 | hlist_for_each_entry_rcu(entry, n, hhd, hlist) { | ||
| 1025 | if (entry->ip == ip) | ||
| 1026 | return entry; | ||
| 1027 | } | ||
| 1028 | return NULL; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | static void __add_hash_entry(struct ftrace_hash *hash, | ||
| 1032 | struct ftrace_func_entry *entry) | ||
| 1033 | { | ||
| 1034 | struct hlist_head *hhd; | ||
| 1035 | unsigned long key; | ||
| 1036 | |||
| 1037 | if (hash->size_bits) | ||
| 1038 | key = hash_long(entry->ip, hash->size_bits); | ||
| 1039 | else | ||
| 1040 | key = 0; | ||
| 1041 | |||
| 1042 | hhd = &hash->buckets[key]; | ||
| 1043 | hlist_add_head(&entry->hlist, hhd); | ||
| 1044 | hash->count++; | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | ||
| 1048 | { | ||
| 1049 | struct ftrace_func_entry *entry; | ||
| 1050 | |||
| 1051 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
| 1052 | if (!entry) | ||
| 1053 | return -ENOMEM; | ||
| 1054 | |||
| 1055 | entry->ip = ip; | ||
| 1056 | __add_hash_entry(hash, entry); | ||
| 1057 | |||
| 1058 | return 0; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | static void | ||
| 1062 | free_hash_entry(struct ftrace_hash *hash, | ||
| 1063 | struct ftrace_func_entry *entry) | ||
| 1064 | { | ||
| 1065 | hlist_del(&entry->hlist); | ||
| 1066 | kfree(entry); | ||
| 1067 | hash->count--; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | static void | ||
| 1071 | remove_hash_entry(struct ftrace_hash *hash, | ||
| 1072 | struct ftrace_func_entry *entry) | ||
| 1073 | { | ||
| 1074 | hlist_del(&entry->hlist); | ||
| 1075 | hash->count--; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | static void ftrace_hash_clear(struct ftrace_hash *hash) | ||
| 1079 | { | ||
| 1080 | struct hlist_head *hhd; | ||
| 1081 | struct hlist_node *tp, *tn; | ||
| 1082 | struct ftrace_func_entry *entry; | ||
| 1083 | int size = 1 << hash->size_bits; | ||
| 1084 | int i; | ||
| 1085 | |||
| 1086 | if (!hash->count) | ||
| 1087 | return; | ||
| 1088 | |||
| 1089 | for (i = 0; i < size; i++) { | ||
| 1090 | hhd = &hash->buckets[i]; | ||
| 1091 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) | ||
| 1092 | free_hash_entry(hash, entry); | ||
| 1093 | } | ||
| 1094 | FTRACE_WARN_ON(hash->count); | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | static void free_ftrace_hash(struct ftrace_hash *hash) | ||
| 1098 | { | ||
| 1099 | if (!hash || hash == EMPTY_HASH) | ||
| 1100 | return; | ||
| 1101 | ftrace_hash_clear(hash); | ||
| 1102 | kfree(hash->buckets); | ||
| 1103 | kfree(hash); | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) | ||
| 1107 | { | ||
| 1108 | struct ftrace_hash *hash; | ||
| 1109 | |||
| 1110 | hash = container_of(rcu, struct ftrace_hash, rcu); | ||
| 1111 | free_ftrace_hash(hash); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | ||
| 1115 | { | ||
| 1116 | if (!hash || hash == EMPTY_HASH) | ||
| 1117 | return; | ||
| 1118 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | ||
| 1122 | { | ||
| 1123 | struct ftrace_hash *hash; | ||
| 1124 | int size; | ||
| 1125 | |||
| 1126 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | ||
| 1127 | if (!hash) | ||
| 1128 | return NULL; | ||
| 1129 | |||
| 1130 | size = 1 << size_bits; | ||
| 1131 | hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); | ||
| 1132 | |||
| 1133 | if (!hash->buckets) { | ||
| 1134 | kfree(hash); | ||
| 1135 | return NULL; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | hash->size_bits = size_bits; | ||
| 1139 | |||
| 1140 | return hash; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | static struct ftrace_hash * | ||
| 1144 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | ||
| 1145 | { | ||
| 1146 | struct ftrace_func_entry *entry; | ||
| 1147 | struct ftrace_hash *new_hash; | ||
| 1148 | struct hlist_node *tp; | ||
| 1149 | int size; | ||
| 1150 | int ret; | ||
| 1151 | int i; | ||
| 1152 | |||
| 1153 | new_hash = alloc_ftrace_hash(size_bits); | ||
| 1154 | if (!new_hash) | ||
| 1155 | return NULL; | ||
| 1156 | |||
| 1157 | /* Empty hash? */ | ||
| 1158 | if (!hash || !hash->count) | ||
| 1159 | return new_hash; | ||
| 1160 | |||
| 1161 | size = 1 << hash->size_bits; | ||
| 1162 | for (i = 0; i < size; i++) { | ||
| 1163 | hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { | ||
| 1164 | ret = add_hash_entry(new_hash, entry->ip); | ||
| 1165 | if (ret < 0) | ||
| 1166 | goto free_hash; | ||
| 1167 | } | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | FTRACE_WARN_ON(new_hash->count != hash->count); | ||
| 1171 | |||
| 1172 | return new_hash; | ||
| 1173 | |||
| 1174 | free_hash: | ||
| 1175 | free_ftrace_hash(new_hash); | ||
| 1176 | return NULL; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | static int | ||
| 1180 | ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) | ||
| 1181 | { | ||
| 1182 | struct ftrace_func_entry *entry; | ||
| 1183 | struct hlist_node *tp, *tn; | ||
| 1184 | struct hlist_head *hhd; | ||
| 1185 | struct ftrace_hash *old_hash; | ||
| 1186 | struct ftrace_hash *new_hash; | ||
| 1187 | unsigned long key; | ||
| 1188 | int size = src->count; | ||
| 1189 | int bits = 0; | ||
| 1190 | int i; | ||
| 1191 | |||
| 1192 | /* | ||
| 1193 | * If the new source is empty, just free dst and assign it | ||
| 1194 | * the empty_hash. | ||
| 1195 | */ | ||
| 1196 | if (!src->count) { | ||
| 1197 | free_ftrace_hash_rcu(*dst); | ||
| 1198 | rcu_assign_pointer(*dst, EMPTY_HASH); | ||
| 1199 | return 0; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | /* | ||
| 1203 | * Make the hash size about 1/2 the # found | ||
| 1204 | */ | ||
| 1205 | for (size /= 2; size; size >>= 1) | ||
| 1206 | bits++; | ||
| 1207 | |||
| 1208 | /* Don't allocate too much */ | ||
| 1209 | if (bits > FTRACE_HASH_MAX_BITS) | ||
| 1210 | bits = FTRACE_HASH_MAX_BITS; | ||
| 1211 | |||
| 1212 | new_hash = alloc_ftrace_hash(bits); | ||
| 1213 | if (!new_hash) | ||
| 1214 | return -ENOMEM; | ||
| 1215 | |||
| 1216 | size = 1 << src->size_bits; | ||
| 1217 | for (i = 0; i < size; i++) { | ||
| 1218 | hhd = &src->buckets[i]; | ||
| 1219 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { | ||
| 1220 | if (bits > 0) | ||
| 1221 | key = hash_long(entry->ip, bits); | ||
| 1222 | else | ||
| 1223 | key = 0; | ||
| 1224 | remove_hash_entry(src, entry); | ||
| 1225 | __add_hash_entry(new_hash, entry); | ||
| 1226 | } | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | old_hash = *dst; | ||
| 1230 | rcu_assign_pointer(*dst, new_hash); | ||
| 1231 | free_ftrace_hash_rcu(old_hash); | ||
| 1232 | |||
| 1233 | return 0; | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | /* | ||
| 1237 | * Test the hashes for this ops to see if we want to call | ||
| 1238 | * the ops->func or not. | ||
| 1239 | * | ||
| 1240 | * It's a match if the ip is in the ops->filter_hash or | ||
| 1241 | * the filter_hash does not exist or is empty, | ||
| 1242 | * AND | ||
| 1243 | * the ip is not in the ops->notrace_hash. | ||
| 1244 | * | ||
| 1245 | * This needs to be called with preemption disabled as | ||
| 1246 | * the hashes are freed with call_rcu_sched(). | ||
| 1247 | */ | ||
| 1248 | static int | ||
| 1249 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
| 1250 | { | ||
| 1251 | struct ftrace_hash *filter_hash; | ||
| 1252 | struct ftrace_hash *notrace_hash; | ||
| 1253 | int ret; | ||
| 1254 | |||
| 1255 | filter_hash = rcu_dereference_raw(ops->filter_hash); | ||
| 1256 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | ||
| 1257 | |||
| 1258 | if ((!filter_hash || !filter_hash->count || | ||
| 1259 | ftrace_lookup_ip(filter_hash, ip)) && | ||
| 1260 | (!notrace_hash || !notrace_hash->count || | ||
| 1261 | !ftrace_lookup_ip(notrace_hash, ip))) | ||
| 1262 | ret = 1; | ||
| 1263 | else | ||
| 1264 | ret = 0; | ||
| 1265 | |||
| 1266 | return ret; | ||
| 1267 | } | ||
| 1268 | |||
| 915 | /* | 1269 | /* |
| 916 | * This is a double for. Do not use 'break' to break out of the loop, | 1270 | * This is a double for. Do not use 'break' to break out of the loop, |
| 917 | * you must use a goto. | 1271 | * you must use a goto. |
| @@ -926,6 +1280,105 @@ static struct dyn_ftrace *ftrace_free_records; | |||
| 926 | } \ | 1280 | } \ |
| 927 | } | 1281 | } |
| 928 | 1282 | ||
| 1283 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | ||
| 1284 | int filter_hash, | ||
| 1285 | bool inc) | ||
| 1286 | { | ||
| 1287 | struct ftrace_hash *hash; | ||
| 1288 | struct ftrace_hash *other_hash; | ||
| 1289 | struct ftrace_page *pg; | ||
| 1290 | struct dyn_ftrace *rec; | ||
| 1291 | int count = 0; | ||
| 1292 | int all = 0; | ||
| 1293 | |||
| 1294 | /* Only update if the ops has been registered */ | ||
| 1295 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
| 1296 | return; | ||
| 1297 | |||
| 1298 | /* | ||
| 1299 | * In the filter_hash case: | ||
| 1300 | * If the count is zero, we update all records. | ||
| 1301 | * Otherwise we just update the items in the hash. | ||
| 1302 | * | ||
| 1303 | * In the notrace_hash case: | ||
| 1304 | * We enable the update in the hash. | ||
| 1305 | * As disabling notrace means enabling the tracing, | ||
| 1306 | * and enabling notrace means disabling, the inc variable | ||
| 1307 | * gets inversed. | ||
| 1308 | */ | ||
| 1309 | if (filter_hash) { | ||
| 1310 | hash = ops->filter_hash; | ||
| 1311 | other_hash = ops->notrace_hash; | ||
| 1312 | if (!hash || !hash->count) | ||
| 1313 | all = 1; | ||
| 1314 | } else { | ||
| 1315 | inc = !inc; | ||
| 1316 | hash = ops->notrace_hash; | ||
| 1317 | other_hash = ops->filter_hash; | ||
| 1318 | /* | ||
| 1319 | * If the notrace hash has no items, | ||
| 1320 | * then there's nothing to do. | ||
| 1321 | */ | ||
| 1322 | if (hash && !hash->count) | ||
| 1323 | return; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1327 | int in_other_hash = 0; | ||
| 1328 | int in_hash = 0; | ||
| 1329 | int match = 0; | ||
| 1330 | |||
| 1331 | if (all) { | ||
| 1332 | /* | ||
| 1333 | * Only the filter_hash affects all records. | ||
| 1334 | * Update if the record is not in the notrace hash. | ||
| 1335 | */ | ||
| 1336 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) | ||
| 1337 | match = 1; | ||
| 1338 | } else { | ||
| 1339 | in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip); | ||
| 1340 | in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip); | ||
| 1341 | |||
| 1342 | /* | ||
| 1343 | * | ||
| 1344 | */ | ||
| 1345 | if (filter_hash && in_hash && !in_other_hash) | ||
| 1346 | match = 1; | ||
| 1347 | else if (!filter_hash && in_hash && | ||
| 1348 | (in_other_hash || !other_hash->count)) | ||
| 1349 | match = 1; | ||
| 1350 | } | ||
| 1351 | if (!match) | ||
| 1352 | continue; | ||
| 1353 | |||
| 1354 | if (inc) { | ||
| 1355 | rec->flags++; | ||
| 1356 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | ||
| 1357 | return; | ||
| 1358 | } else { | ||
| 1359 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | ||
| 1360 | return; | ||
| 1361 | rec->flags--; | ||
| 1362 | } | ||
| 1363 | count++; | ||
| 1364 | /* Shortcut, if we handled all records, we are done. */ | ||
| 1365 | if (!all && count == hash->count) | ||
| 1366 | return; | ||
| 1367 | } while_for_each_ftrace_rec(); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, | ||
| 1371 | int filter_hash) | ||
| 1372 | { | ||
| 1373 | __ftrace_hash_rec_update(ops, filter_hash, 0); | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | ||
| 1377 | int filter_hash) | ||
| 1378 | { | ||
| 1379 | __ftrace_hash_rec_update(ops, filter_hash, 1); | ||
| 1380 | } | ||
| 1381 | |||
| 929 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 1382 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
| 930 | { | 1383 | { |
| 931 | rec->freelist = ftrace_free_records; | 1384 | rec->freelist = ftrace_free_records; |
| @@ -1047,18 +1500,18 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 1047 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1500 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
| 1048 | 1501 | ||
| 1049 | /* | 1502 | /* |
| 1050 | * If this record is not to be traced or we want to disable it, | 1503 | * If we are enabling tracing: |
| 1051 | * then disable it. | 1504 | * |
| 1505 | * If the record has a ref count, then we need to enable it | ||
| 1506 | * because someone is using it. | ||
| 1052 | * | 1507 | * |
| 1053 | * If we want to enable it and filtering is off, then enable it. | 1508 | * Otherwise we make sure its disabled. |
| 1054 | * | 1509 | * |
| 1055 | * If we want to enable it and filtering is on, enable it only if | 1510 | * If we are disabling tracing, then disable all records that |
| 1056 | * it's filtered | 1511 | * are enabled. |
| 1057 | */ | 1512 | */ |
| 1058 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { | 1513 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
| 1059 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) | 1514 | flag = FTRACE_FL_ENABLED; |
| 1060 | flag = FTRACE_FL_ENABLED; | ||
| 1061 | } | ||
| 1062 | 1515 | ||
| 1063 | /* If the state of this record hasn't changed, then do nothing */ | 1516 | /* If the state of this record hasn't changed, then do nothing */ |
| 1064 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1517 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
| @@ -1079,19 +1532,16 @@ static void ftrace_replace_code(int enable) | |||
| 1079 | struct ftrace_page *pg; | 1532 | struct ftrace_page *pg; |
| 1080 | int failed; | 1533 | int failed; |
| 1081 | 1534 | ||
| 1535 | if (unlikely(ftrace_disabled)) | ||
| 1536 | return; | ||
| 1537 | |||
| 1082 | do_for_each_ftrace_rec(pg, rec) { | 1538 | do_for_each_ftrace_rec(pg, rec) { |
| 1083 | /* | 1539 | /* Skip over free records */ |
| 1084 | * Skip over free records, records that have | 1540 | if (rec->flags & FTRACE_FL_FREE) |
| 1085 | * failed and not converted. | ||
| 1086 | */ | ||
| 1087 | if (rec->flags & FTRACE_FL_FREE || | ||
| 1088 | rec->flags & FTRACE_FL_FAILED || | ||
| 1089 | !(rec->flags & FTRACE_FL_CONVERTED)) | ||
| 1090 | continue; | 1541 | continue; |
| 1091 | 1542 | ||
| 1092 | failed = __ftrace_replace_code(rec, enable); | 1543 | failed = __ftrace_replace_code(rec, enable); |
| 1093 | if (failed) { | 1544 | if (failed) { |
| 1094 | rec->flags |= FTRACE_FL_FAILED; | ||
| 1095 | ftrace_bug(failed, rec->ip); | 1545 | ftrace_bug(failed, rec->ip); |
| 1096 | /* Stop processing */ | 1546 | /* Stop processing */ |
| 1097 | return; | 1547 | return; |
| @@ -1107,10 +1557,12 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
| 1107 | 1557 | ||
| 1108 | ip = rec->ip; | 1558 | ip = rec->ip; |
| 1109 | 1559 | ||
| 1560 | if (unlikely(ftrace_disabled)) | ||
| 1561 | return 0; | ||
| 1562 | |||
| 1110 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 1563 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
| 1111 | if (ret) { | 1564 | if (ret) { |
| 1112 | ftrace_bug(ret, ip); | 1565 | ftrace_bug(ret, ip); |
| 1113 | rec->flags |= FTRACE_FL_FAILED; | ||
| 1114 | return 0; | 1566 | return 0; |
| 1115 | } | 1567 | } |
| 1116 | return 1; | 1568 | return 1; |
| @@ -1171,6 +1623,7 @@ static void ftrace_run_update_code(int command) | |||
| 1171 | 1623 | ||
| 1172 | static ftrace_func_t saved_ftrace_func; | 1624 | static ftrace_func_t saved_ftrace_func; |
| 1173 | static int ftrace_start_up; | 1625 | static int ftrace_start_up; |
| 1626 | static int global_start_up; | ||
| 1174 | 1627 | ||
| 1175 | static void ftrace_startup_enable(int command) | 1628 | static void ftrace_startup_enable(int command) |
| 1176 | { | 1629 | { |
| @@ -1185,19 +1638,36 @@ static void ftrace_startup_enable(int command) | |||
| 1185 | ftrace_run_update_code(command); | 1638 | ftrace_run_update_code(command); |
| 1186 | } | 1639 | } |
| 1187 | 1640 | ||
| 1188 | static void ftrace_startup(int command) | 1641 | static void ftrace_startup(struct ftrace_ops *ops, int command) |
| 1189 | { | 1642 | { |
| 1643 | bool hash_enable = true; | ||
| 1644 | |||
| 1190 | if (unlikely(ftrace_disabled)) | 1645 | if (unlikely(ftrace_disabled)) |
| 1191 | return; | 1646 | return; |
| 1192 | 1647 | ||
| 1193 | ftrace_start_up++; | 1648 | ftrace_start_up++; |
| 1194 | command |= FTRACE_ENABLE_CALLS; | 1649 | command |= FTRACE_ENABLE_CALLS; |
| 1195 | 1650 | ||
| 1651 | /* ops marked global share the filter hashes */ | ||
| 1652 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
| 1653 | ops = &global_ops; | ||
| 1654 | /* Don't update hash if global is already set */ | ||
| 1655 | if (global_start_up) | ||
| 1656 | hash_enable = false; | ||
| 1657 | global_start_up++; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
| 1661 | if (hash_enable) | ||
| 1662 | ftrace_hash_rec_enable(ops, 1); | ||
| 1663 | |||
| 1196 | ftrace_startup_enable(command); | 1664 | ftrace_startup_enable(command); |
| 1197 | } | 1665 | } |
| 1198 | 1666 | ||
| 1199 | static void ftrace_shutdown(int command) | 1667 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) |
| 1200 | { | 1668 | { |
| 1669 | bool hash_disable = true; | ||
| 1670 | |||
| 1201 | if (unlikely(ftrace_disabled)) | 1671 | if (unlikely(ftrace_disabled)) |
| 1202 | return; | 1672 | return; |
| 1203 | 1673 | ||
| @@ -1209,6 +1679,23 @@ static void ftrace_shutdown(int command) | |||
| 1209 | */ | 1679 | */ |
| 1210 | WARN_ON_ONCE(ftrace_start_up < 0); | 1680 | WARN_ON_ONCE(ftrace_start_up < 0); |
| 1211 | 1681 | ||
| 1682 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
| 1683 | ops = &global_ops; | ||
| 1684 | global_start_up--; | ||
| 1685 | WARN_ON_ONCE(global_start_up < 0); | ||
| 1686 | /* Don't update hash if global still has users */ | ||
| 1687 | if (global_start_up) { | ||
| 1688 | WARN_ON_ONCE(!ftrace_start_up); | ||
| 1689 | hash_disable = false; | ||
| 1690 | } | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | if (hash_disable) | ||
| 1694 | ftrace_hash_rec_disable(ops, 1); | ||
| 1695 | |||
| 1696 | if (ops != &global_ops || !global_start_up) | ||
| 1697 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
| 1698 | |||
| 1212 | if (!ftrace_start_up) | 1699 | if (!ftrace_start_up) |
| 1213 | command |= FTRACE_DISABLE_CALLS; | 1700 | command |= FTRACE_DISABLE_CALLS; |
| 1214 | 1701 | ||
| @@ -1273,10 +1760,10 @@ static int ftrace_update_code(struct module *mod) | |||
| 1273 | */ | 1760 | */ |
| 1274 | if (!ftrace_code_disable(mod, p)) { | 1761 | if (!ftrace_code_disable(mod, p)) { |
| 1275 | ftrace_free_rec(p); | 1762 | ftrace_free_rec(p); |
| 1276 | continue; | 1763 | /* Game over */ |
| 1764 | break; | ||
| 1277 | } | 1765 | } |
| 1278 | 1766 | ||
| 1279 | p->flags |= FTRACE_FL_CONVERTED; | ||
| 1280 | ftrace_update_cnt++; | 1767 | ftrace_update_cnt++; |
| 1281 | 1768 | ||
| 1282 | /* | 1769 | /* |
| @@ -1351,9 +1838,9 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
| 1351 | enum { | 1838 | enum { |
| 1352 | FTRACE_ITER_FILTER = (1 << 0), | 1839 | FTRACE_ITER_FILTER = (1 << 0), |
| 1353 | FTRACE_ITER_NOTRACE = (1 << 1), | 1840 | FTRACE_ITER_NOTRACE = (1 << 1), |
| 1354 | FTRACE_ITER_FAILURES = (1 << 2), | 1841 | FTRACE_ITER_PRINTALL = (1 << 2), |
| 1355 | FTRACE_ITER_PRINTALL = (1 << 3), | 1842 | FTRACE_ITER_HASH = (1 << 3), |
| 1356 | FTRACE_ITER_HASH = (1 << 4), | 1843 | FTRACE_ITER_ENABLED = (1 << 4), |
| 1357 | }; | 1844 | }; |
| 1358 | 1845 | ||
| 1359 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1846 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
| @@ -1365,6 +1852,8 @@ struct ftrace_iterator { | |||
| 1365 | struct dyn_ftrace *func; | 1852 | struct dyn_ftrace *func; |
| 1366 | struct ftrace_func_probe *probe; | 1853 | struct ftrace_func_probe *probe; |
| 1367 | struct trace_parser parser; | 1854 | struct trace_parser parser; |
| 1855 | struct ftrace_hash *hash; | ||
| 1856 | struct ftrace_ops *ops; | ||
| 1368 | int hidx; | 1857 | int hidx; |
| 1369 | int idx; | 1858 | int idx; |
| 1370 | unsigned flags; | 1859 | unsigned flags; |
| @@ -1461,8 +1950,12 @@ static void * | |||
| 1461 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1950 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 1462 | { | 1951 | { |
| 1463 | struct ftrace_iterator *iter = m->private; | 1952 | struct ftrace_iterator *iter = m->private; |
| 1953 | struct ftrace_ops *ops = &global_ops; | ||
| 1464 | struct dyn_ftrace *rec = NULL; | 1954 | struct dyn_ftrace *rec = NULL; |
| 1465 | 1955 | ||
| 1956 | if (unlikely(ftrace_disabled)) | ||
| 1957 | return NULL; | ||
| 1958 | |||
| 1466 | if (iter->flags & FTRACE_ITER_HASH) | 1959 | if (iter->flags & FTRACE_ITER_HASH) |
| 1467 | return t_hash_next(m, pos); | 1960 | return t_hash_next(m, pos); |
| 1468 | 1961 | ||
| @@ -1483,17 +1976,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1483 | rec = &iter->pg->records[iter->idx++]; | 1976 | rec = &iter->pg->records[iter->idx++]; |
| 1484 | if ((rec->flags & FTRACE_FL_FREE) || | 1977 | if ((rec->flags & FTRACE_FL_FREE) || |
| 1485 | 1978 | ||
| 1486 | (!(iter->flags & FTRACE_ITER_FAILURES) && | ||
| 1487 | (rec->flags & FTRACE_FL_FAILED)) || | ||
| 1488 | |||
| 1489 | ((iter->flags & FTRACE_ITER_FAILURES) && | ||
| 1490 | !(rec->flags & FTRACE_FL_FAILED)) || | ||
| 1491 | |||
| 1492 | ((iter->flags & FTRACE_ITER_FILTER) && | 1979 | ((iter->flags & FTRACE_ITER_FILTER) && |
| 1493 | !(rec->flags & FTRACE_FL_FILTER)) || | 1980 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || |
| 1494 | 1981 | ||
| 1495 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 1982 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
| 1496 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 1983 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || |
| 1984 | |||
| 1985 | ((iter->flags & FTRACE_ITER_ENABLED) && | ||
| 1986 | !(rec->flags & ~FTRACE_FL_MASK))) { | ||
| 1987 | |||
| 1497 | rec = NULL; | 1988 | rec = NULL; |
| 1498 | goto retry; | 1989 | goto retry; |
| 1499 | } | 1990 | } |
| @@ -1517,10 +2008,15 @@ static void reset_iter_read(struct ftrace_iterator *iter) | |||
| 1517 | static void *t_start(struct seq_file *m, loff_t *pos) | 2008 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 1518 | { | 2009 | { |
| 1519 | struct ftrace_iterator *iter = m->private; | 2010 | struct ftrace_iterator *iter = m->private; |
| 2011 | struct ftrace_ops *ops = &global_ops; | ||
| 1520 | void *p = NULL; | 2012 | void *p = NULL; |
| 1521 | loff_t l; | 2013 | loff_t l; |
| 1522 | 2014 | ||
| 1523 | mutex_lock(&ftrace_lock); | 2015 | mutex_lock(&ftrace_lock); |
| 2016 | |||
| 2017 | if (unlikely(ftrace_disabled)) | ||
| 2018 | return NULL; | ||
| 2019 | |||
| 1524 | /* | 2020 | /* |
| 1525 | * If an lseek was done, then reset and start from beginning. | 2021 | * If an lseek was done, then reset and start from beginning. |
| 1526 | */ | 2022 | */ |
| @@ -1532,7 +2028,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1532 | * off, we can short cut and just print out that all | 2028 | * off, we can short cut and just print out that all |
| 1533 | * functions are enabled. | 2029 | * functions are enabled. |
| 1534 | */ | 2030 | */ |
| 1535 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | 2031 | if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) { |
| 1536 | if (*pos > 0) | 2032 | if (*pos > 0) |
| 1537 | return t_hash_start(m, pos); | 2033 | return t_hash_start(m, pos); |
| 1538 | iter->flags |= FTRACE_ITER_PRINTALL; | 2034 | iter->flags |= FTRACE_ITER_PRINTALL; |
| @@ -1590,7 +2086,11 @@ static int t_show(struct seq_file *m, void *v) | |||
| 1590 | if (!rec) | 2086 | if (!rec) |
| 1591 | return 0; | 2087 | return 0; |
| 1592 | 2088 | ||
| 1593 | seq_printf(m, "%ps\n", (void *)rec->ip); | 2089 | seq_printf(m, "%ps", (void *)rec->ip); |
| 2090 | if (iter->flags & FTRACE_ITER_ENABLED) | ||
| 2091 | seq_printf(m, " (%ld)", | ||
| 2092 | rec->flags & ~FTRACE_FL_MASK); | ||
| 2093 | seq_printf(m, "\n"); | ||
| 1594 | 2094 | ||
| 1595 | return 0; | 2095 | return 0; |
| 1596 | } | 2096 | } |
| @@ -1630,44 +2130,46 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
| 1630 | } | 2130 | } |
| 1631 | 2131 | ||
| 1632 | static int | 2132 | static int |
| 1633 | ftrace_failures_open(struct inode *inode, struct file *file) | 2133 | ftrace_enabled_open(struct inode *inode, struct file *file) |
| 1634 | { | 2134 | { |
| 1635 | int ret; | ||
| 1636 | struct seq_file *m; | ||
| 1637 | struct ftrace_iterator *iter; | 2135 | struct ftrace_iterator *iter; |
| 2136 | int ret; | ||
| 2137 | |||
| 2138 | if (unlikely(ftrace_disabled)) | ||
| 2139 | return -ENODEV; | ||
| 2140 | |||
| 2141 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
| 2142 | if (!iter) | ||
| 2143 | return -ENOMEM; | ||
| 2144 | |||
| 2145 | iter->pg = ftrace_pages_start; | ||
| 2146 | iter->flags = FTRACE_ITER_ENABLED; | ||
| 1638 | 2147 | ||
| 1639 | ret = ftrace_avail_open(inode, file); | 2148 | ret = seq_open(file, &show_ftrace_seq_ops); |
| 1640 | if (!ret) { | 2149 | if (!ret) { |
| 1641 | m = file->private_data; | 2150 | struct seq_file *m = file->private_data; |
| 1642 | iter = m->private; | 2151 | |
| 1643 | iter->flags = FTRACE_ITER_FAILURES; | 2152 | m->private = iter; |
| 2153 | } else { | ||
| 2154 | kfree(iter); | ||
| 1644 | } | 2155 | } |
| 1645 | 2156 | ||
| 1646 | return ret; | 2157 | return ret; |
| 1647 | } | 2158 | } |
| 1648 | 2159 | ||
| 1649 | 2160 | static void ftrace_filter_reset(struct ftrace_hash *hash) | |
| 1650 | static void ftrace_filter_reset(int enable) | ||
| 1651 | { | 2161 | { |
| 1652 | struct ftrace_page *pg; | ||
| 1653 | struct dyn_ftrace *rec; | ||
| 1654 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
| 1655 | |||
| 1656 | mutex_lock(&ftrace_lock); | 2162 | mutex_lock(&ftrace_lock); |
| 1657 | if (enable) | 2163 | ftrace_hash_clear(hash); |
| 1658 | ftrace_filtered = 0; | ||
| 1659 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1660 | if (rec->flags & FTRACE_FL_FAILED) | ||
| 1661 | continue; | ||
| 1662 | rec->flags &= ~type; | ||
| 1663 | } while_for_each_ftrace_rec(); | ||
| 1664 | mutex_unlock(&ftrace_lock); | 2164 | mutex_unlock(&ftrace_lock); |
| 1665 | } | 2165 | } |
| 1666 | 2166 | ||
| 1667 | static int | 2167 | static int |
| 1668 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) | 2168 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
| 2169 | struct inode *inode, struct file *file) | ||
| 1669 | { | 2170 | { |
| 1670 | struct ftrace_iterator *iter; | 2171 | struct ftrace_iterator *iter; |
| 2172 | struct ftrace_hash *hash; | ||
| 1671 | int ret = 0; | 2173 | int ret = 0; |
| 1672 | 2174 | ||
| 1673 | if (unlikely(ftrace_disabled)) | 2175 | if (unlikely(ftrace_disabled)) |
| @@ -1682,21 +2184,42 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
| 1682 | return -ENOMEM; | 2184 | return -ENOMEM; |
| 1683 | } | 2185 | } |
| 1684 | 2186 | ||
| 2187 | if (flag & FTRACE_ITER_NOTRACE) | ||
| 2188 | hash = ops->notrace_hash; | ||
| 2189 | else | ||
| 2190 | hash = ops->filter_hash; | ||
| 2191 | |||
| 2192 | iter->ops = ops; | ||
| 2193 | iter->flags = flag; | ||
| 2194 | |||
| 2195 | if (file->f_mode & FMODE_WRITE) { | ||
| 2196 | mutex_lock(&ftrace_lock); | ||
| 2197 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | ||
| 2198 | mutex_unlock(&ftrace_lock); | ||
| 2199 | |||
| 2200 | if (!iter->hash) { | ||
| 2201 | trace_parser_put(&iter->parser); | ||
| 2202 | kfree(iter); | ||
| 2203 | return -ENOMEM; | ||
| 2204 | } | ||
| 2205 | } | ||
| 2206 | |||
| 1685 | mutex_lock(&ftrace_regex_lock); | 2207 | mutex_lock(&ftrace_regex_lock); |
| 2208 | |||
| 1686 | if ((file->f_mode & FMODE_WRITE) && | 2209 | if ((file->f_mode & FMODE_WRITE) && |
| 1687 | (file->f_flags & O_TRUNC)) | 2210 | (file->f_flags & O_TRUNC)) |
| 1688 | ftrace_filter_reset(enable); | 2211 | ftrace_filter_reset(iter->hash); |
| 1689 | 2212 | ||
| 1690 | if (file->f_mode & FMODE_READ) { | 2213 | if (file->f_mode & FMODE_READ) { |
| 1691 | iter->pg = ftrace_pages_start; | 2214 | iter->pg = ftrace_pages_start; |
| 1692 | iter->flags = enable ? FTRACE_ITER_FILTER : | ||
| 1693 | FTRACE_ITER_NOTRACE; | ||
| 1694 | 2215 | ||
| 1695 | ret = seq_open(file, &show_ftrace_seq_ops); | 2216 | ret = seq_open(file, &show_ftrace_seq_ops); |
| 1696 | if (!ret) { | 2217 | if (!ret) { |
| 1697 | struct seq_file *m = file->private_data; | 2218 | struct seq_file *m = file->private_data; |
| 1698 | m->private = iter; | 2219 | m->private = iter; |
| 1699 | } else { | 2220 | } else { |
| 2221 | /* Failed */ | ||
| 2222 | free_ftrace_hash(iter->hash); | ||
| 1700 | trace_parser_put(&iter->parser); | 2223 | trace_parser_put(&iter->parser); |
| 1701 | kfree(iter); | 2224 | kfree(iter); |
| 1702 | } | 2225 | } |
| @@ -1710,13 +2233,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
| 1710 | static int | 2233 | static int |
| 1711 | ftrace_filter_open(struct inode *inode, struct file *file) | 2234 | ftrace_filter_open(struct inode *inode, struct file *file) |
| 1712 | { | 2235 | { |
| 1713 | return ftrace_regex_open(inode, file, 1); | 2236 | return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER, |
| 2237 | inode, file); | ||
| 1714 | } | 2238 | } |
| 1715 | 2239 | ||
| 1716 | static int | 2240 | static int |
| 1717 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2241 | ftrace_notrace_open(struct inode *inode, struct file *file) |
| 1718 | { | 2242 | { |
| 1719 | return ftrace_regex_open(inode, file, 0); | 2243 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, |
| 2244 | inode, file); | ||
| 1720 | } | 2245 | } |
| 1721 | 2246 | ||
| 1722 | static loff_t | 2247 | static loff_t |
| @@ -1761,86 +2286,99 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
| 1761 | } | 2286 | } |
| 1762 | 2287 | ||
| 1763 | static int | 2288 | static int |
| 1764 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | 2289 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) |
| 2290 | { | ||
| 2291 | struct ftrace_func_entry *entry; | ||
| 2292 | int ret = 0; | ||
| 2293 | |||
| 2294 | entry = ftrace_lookup_ip(hash, rec->ip); | ||
| 2295 | if (not) { | ||
| 2296 | /* Do nothing if it doesn't exist */ | ||
| 2297 | if (!entry) | ||
| 2298 | return 0; | ||
| 2299 | |||
| 2300 | free_hash_entry(hash, entry); | ||
| 2301 | } else { | ||
| 2302 | /* Do nothing if it exists */ | ||
| 2303 | if (entry) | ||
| 2304 | return 0; | ||
| 2305 | |||
| 2306 | ret = add_hash_entry(hash, rec->ip); | ||
| 2307 | } | ||
| 2308 | return ret; | ||
| 2309 | } | ||
| 2310 | |||
| 2311 | static int | ||
| 2312 | ftrace_match_record(struct dyn_ftrace *rec, char *mod, | ||
| 2313 | char *regex, int len, int type) | ||
| 1765 | { | 2314 | { |
| 1766 | char str[KSYM_SYMBOL_LEN]; | 2315 | char str[KSYM_SYMBOL_LEN]; |
| 2316 | char *modname; | ||
| 2317 | |||
| 2318 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
| 2319 | |||
| 2320 | if (mod) { | ||
| 2321 | /* module lookup requires matching the module */ | ||
| 2322 | if (!modname || strcmp(modname, mod)) | ||
| 2323 | return 0; | ||
| 2324 | |||
| 2325 | /* blank search means to match all funcs in the mod */ | ||
| 2326 | if (!len) | ||
| 2327 | return 1; | ||
| 2328 | } | ||
| 1767 | 2329 | ||
| 1768 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
| 1769 | return ftrace_match(str, regex, len, type); | 2330 | return ftrace_match(str, regex, len, type); |
| 1770 | } | 2331 | } |
| 1771 | 2332 | ||
| 1772 | static int ftrace_match_records(char *buff, int len, int enable) | 2333 | static int |
| 2334 | match_records(struct ftrace_hash *hash, char *buff, | ||
| 2335 | int len, char *mod, int not) | ||
| 1773 | { | 2336 | { |
| 1774 | unsigned int search_len; | 2337 | unsigned search_len = 0; |
| 1775 | struct ftrace_page *pg; | 2338 | struct ftrace_page *pg; |
| 1776 | struct dyn_ftrace *rec; | 2339 | struct dyn_ftrace *rec; |
| 1777 | unsigned long flag; | 2340 | int type = MATCH_FULL; |
| 1778 | char *search; | 2341 | char *search = buff; |
| 1779 | int type; | ||
| 1780 | int not; | ||
| 1781 | int found = 0; | 2342 | int found = 0; |
| 2343 | int ret; | ||
| 1782 | 2344 | ||
| 1783 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 2345 | if (len) { |
| 1784 | type = filter_parse_regex(buff, len, &search, ¬); | 2346 | type = filter_parse_regex(buff, len, &search, ¬); |
| 1785 | 2347 | search_len = strlen(search); | |
| 1786 | search_len = strlen(search); | 2348 | } |
| 1787 | 2349 | ||
| 1788 | mutex_lock(&ftrace_lock); | 2350 | mutex_lock(&ftrace_lock); |
| 1789 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1790 | 2351 | ||
| 1791 | if (rec->flags & FTRACE_FL_FAILED) | 2352 | if (unlikely(ftrace_disabled)) |
| 1792 | continue; | 2353 | goto out_unlock; |
| 1793 | 2354 | ||
| 1794 | if (ftrace_match_record(rec, search, search_len, type)) { | 2355 | do_for_each_ftrace_rec(pg, rec) { |
| 1795 | if (not) | 2356 | |
| 1796 | rec->flags &= ~flag; | 2357 | if (ftrace_match_record(rec, mod, search, search_len, type)) { |
| 1797 | else | 2358 | ret = enter_record(hash, rec, not); |
| 1798 | rec->flags |= flag; | 2359 | if (ret < 0) { |
| 2360 | found = ret; | ||
| 2361 | goto out_unlock; | ||
| 2362 | } | ||
| 1799 | found = 1; | 2363 | found = 1; |
| 1800 | } | 2364 | } |
| 1801 | /* | ||
| 1802 | * Only enable filtering if we have a function that | ||
| 1803 | * is filtered on. | ||
| 1804 | */ | ||
| 1805 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
| 1806 | ftrace_filtered = 1; | ||
| 1807 | } while_for_each_ftrace_rec(); | 2365 | } while_for_each_ftrace_rec(); |
| 2366 | out_unlock: | ||
| 1808 | mutex_unlock(&ftrace_lock); | 2367 | mutex_unlock(&ftrace_lock); |
| 1809 | 2368 | ||
| 1810 | return found; | 2369 | return found; |
| 1811 | } | 2370 | } |
| 1812 | 2371 | ||
| 1813 | static int | 2372 | static int |
| 1814 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | 2373 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
| 1815 | char *regex, int len, int type) | ||
| 1816 | { | 2374 | { |
| 1817 | char str[KSYM_SYMBOL_LEN]; | 2375 | return match_records(hash, buff, len, NULL, 0); |
| 1818 | char *modname; | ||
| 1819 | |||
| 1820 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
| 1821 | |||
| 1822 | if (!modname || strcmp(modname, mod)) | ||
| 1823 | return 0; | ||
| 1824 | |||
| 1825 | /* blank search means to match all funcs in the mod */ | ||
| 1826 | if (len) | ||
| 1827 | return ftrace_match(str, regex, len, type); | ||
| 1828 | else | ||
| 1829 | return 1; | ||
| 1830 | } | 2376 | } |
| 1831 | 2377 | ||
| 1832 | static int ftrace_match_module_records(char *buff, char *mod, int enable) | 2378 | static int |
| 2379 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) | ||
| 1833 | { | 2380 | { |
| 1834 | unsigned search_len = 0; | ||
| 1835 | struct ftrace_page *pg; | ||
| 1836 | struct dyn_ftrace *rec; | ||
| 1837 | int type = MATCH_FULL; | ||
| 1838 | char *search = buff; | ||
| 1839 | unsigned long flag; | ||
| 1840 | int not = 0; | 2381 | int not = 0; |
| 1841 | int found = 0; | ||
| 1842 | |||
| 1843 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
| 1844 | 2382 | ||
| 1845 | /* blank or '*' mean the same */ | 2383 | /* blank or '*' mean the same */ |
| 1846 | if (strcmp(buff, "*") == 0) | 2384 | if (strcmp(buff, "*") == 0) |
| @@ -1852,32 +2390,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1852 | not = 1; | 2390 | not = 1; |
| 1853 | } | 2391 | } |
| 1854 | 2392 | ||
| 1855 | if (strlen(buff)) { | 2393 | return match_records(hash, buff, strlen(buff), mod, not); |
| 1856 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); | ||
| 1857 | search_len = strlen(search); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | mutex_lock(&ftrace_lock); | ||
| 1861 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1862 | |||
| 1863 | if (rec->flags & FTRACE_FL_FAILED) | ||
| 1864 | continue; | ||
| 1865 | |||
| 1866 | if (ftrace_match_module_record(rec, mod, | ||
| 1867 | search, search_len, type)) { | ||
| 1868 | if (not) | ||
| 1869 | rec->flags &= ~flag; | ||
| 1870 | else | ||
| 1871 | rec->flags |= flag; | ||
| 1872 | found = 1; | ||
| 1873 | } | ||
| 1874 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
| 1875 | ftrace_filtered = 1; | ||
| 1876 | |||
| 1877 | } while_for_each_ftrace_rec(); | ||
| 1878 | mutex_unlock(&ftrace_lock); | ||
| 1879 | |||
| 1880 | return found; | ||
| 1881 | } | 2394 | } |
| 1882 | 2395 | ||
| 1883 | /* | 2396 | /* |
| @@ -1888,7 +2401,10 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1888 | static int | 2401 | static int |
| 1889 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | 2402 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) |
| 1890 | { | 2403 | { |
| 2404 | struct ftrace_ops *ops = &global_ops; | ||
| 2405 | struct ftrace_hash *hash; | ||
| 1891 | char *mod; | 2406 | char *mod; |
| 2407 | int ret = -EINVAL; | ||
| 1892 | 2408 | ||
| 1893 | /* | 2409 | /* |
| 1894 | * cmd == 'mod' because we only registered this func | 2410 | * cmd == 'mod' because we only registered this func |
| @@ -1900,15 +2416,24 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | |||
| 1900 | 2416 | ||
| 1901 | /* we must have a module name */ | 2417 | /* we must have a module name */ |
| 1902 | if (!param) | 2418 | if (!param) |
| 1903 | return -EINVAL; | 2419 | return ret; |
| 1904 | 2420 | ||
| 1905 | mod = strsep(¶m, ":"); | 2421 | mod = strsep(¶m, ":"); |
| 1906 | if (!strlen(mod)) | 2422 | if (!strlen(mod)) |
| 1907 | return -EINVAL; | 2423 | return ret; |
| 1908 | 2424 | ||
| 1909 | if (ftrace_match_module_records(func, mod, enable)) | 2425 | if (enable) |
| 1910 | return 0; | 2426 | hash = ops->filter_hash; |
| 1911 | return -EINVAL; | 2427 | else |
| 2428 | hash = ops->notrace_hash; | ||
| 2429 | |||
| 2430 | ret = ftrace_match_module_records(hash, func, mod); | ||
| 2431 | if (!ret) | ||
| 2432 | ret = -EINVAL; | ||
| 2433 | if (ret < 0) | ||
| 2434 | return ret; | ||
| 2435 | |||
| 2436 | return 0; | ||
| 1912 | } | 2437 | } |
| 1913 | 2438 | ||
| 1914 | static struct ftrace_func_command ftrace_mod_cmd = { | 2439 | static struct ftrace_func_command ftrace_mod_cmd = { |
| @@ -1959,6 +2484,7 @@ static int ftrace_probe_registered; | |||
| 1959 | 2484 | ||
| 1960 | static void __enable_ftrace_function_probe(void) | 2485 | static void __enable_ftrace_function_probe(void) |
| 1961 | { | 2486 | { |
| 2487 | int ret; | ||
| 1962 | int i; | 2488 | int i; |
| 1963 | 2489 | ||
| 1964 | if (ftrace_probe_registered) | 2490 | if (ftrace_probe_registered) |
| @@ -1973,13 +2499,16 @@ static void __enable_ftrace_function_probe(void) | |||
| 1973 | if (i == FTRACE_FUNC_HASHSIZE) | 2499 | if (i == FTRACE_FUNC_HASHSIZE) |
| 1974 | return; | 2500 | return; |
| 1975 | 2501 | ||
| 1976 | __register_ftrace_function(&trace_probe_ops); | 2502 | ret = __register_ftrace_function(&trace_probe_ops); |
| 1977 | ftrace_startup(0); | 2503 | if (!ret) |
| 2504 | ftrace_startup(&trace_probe_ops, 0); | ||
| 2505 | |||
| 1978 | ftrace_probe_registered = 1; | 2506 | ftrace_probe_registered = 1; |
| 1979 | } | 2507 | } |
| 1980 | 2508 | ||
| 1981 | static void __disable_ftrace_function_probe(void) | 2509 | static void __disable_ftrace_function_probe(void) |
| 1982 | { | 2510 | { |
| 2511 | int ret; | ||
| 1983 | int i; | 2512 | int i; |
| 1984 | 2513 | ||
| 1985 | if (!ftrace_probe_registered) | 2514 | if (!ftrace_probe_registered) |
| @@ -1992,8 +2521,10 @@ static void __disable_ftrace_function_probe(void) | |||
| 1992 | } | 2521 | } |
| 1993 | 2522 | ||
| 1994 | /* no more funcs left */ | 2523 | /* no more funcs left */ |
| 1995 | __unregister_ftrace_function(&trace_probe_ops); | 2524 | ret = __unregister_ftrace_function(&trace_probe_ops); |
| 1996 | ftrace_shutdown(0); | 2525 | if (!ret) |
| 2526 | ftrace_shutdown(&trace_probe_ops, 0); | ||
| 2527 | |||
| 1997 | ftrace_probe_registered = 0; | 2528 | ftrace_probe_registered = 0; |
| 1998 | } | 2529 | } |
| 1999 | 2530 | ||
| @@ -2029,12 +2560,13 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 2029 | return -EINVAL; | 2560 | return -EINVAL; |
| 2030 | 2561 | ||
| 2031 | mutex_lock(&ftrace_lock); | 2562 | mutex_lock(&ftrace_lock); |
| 2032 | do_for_each_ftrace_rec(pg, rec) { | ||
| 2033 | 2563 | ||
| 2034 | if (rec->flags & FTRACE_FL_FAILED) | 2564 | if (unlikely(ftrace_disabled)) |
| 2035 | continue; | 2565 | goto out_unlock; |
| 2566 | |||
| 2567 | do_for_each_ftrace_rec(pg, rec) { | ||
| 2036 | 2568 | ||
| 2037 | if (!ftrace_match_record(rec, search, len, type)) | 2569 | if (!ftrace_match_record(rec, NULL, search, len, type)) |
| 2038 | continue; | 2570 | continue; |
| 2039 | 2571 | ||
| 2040 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 2572 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
| @@ -2195,18 +2727,22 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) | |||
| 2195 | return ret; | 2727 | return ret; |
| 2196 | } | 2728 | } |
| 2197 | 2729 | ||
| 2198 | static int ftrace_process_regex(char *buff, int len, int enable) | 2730 | static int ftrace_process_regex(struct ftrace_hash *hash, |
| 2731 | char *buff, int len, int enable) | ||
| 2199 | { | 2732 | { |
| 2200 | char *func, *command, *next = buff; | 2733 | char *func, *command, *next = buff; |
| 2201 | struct ftrace_func_command *p; | 2734 | struct ftrace_func_command *p; |
| 2202 | int ret = -EINVAL; | 2735 | int ret; |
| 2203 | 2736 | ||
| 2204 | func = strsep(&next, ":"); | 2737 | func = strsep(&next, ":"); |
| 2205 | 2738 | ||
| 2206 | if (!next) { | 2739 | if (!next) { |
| 2207 | if (ftrace_match_records(func, len, enable)) | 2740 | ret = ftrace_match_records(hash, func, len); |
| 2208 | return 0; | 2741 | if (!ret) |
| 2209 | return ret; | 2742 | ret = -EINVAL; |
| 2743 | if (ret < 0) | ||
| 2744 | return ret; | ||
| 2745 | return 0; | ||
| 2210 | } | 2746 | } |
| 2211 | 2747 | ||
| 2212 | /* command found */ | 2748 | /* command found */ |
| @@ -2239,6 +2775,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
| 2239 | 2775 | ||
| 2240 | mutex_lock(&ftrace_regex_lock); | 2776 | mutex_lock(&ftrace_regex_lock); |
| 2241 | 2777 | ||
| 2778 | ret = -ENODEV; | ||
| 2779 | if (unlikely(ftrace_disabled)) | ||
| 2780 | goto out_unlock; | ||
| 2781 | |||
| 2242 | if (file->f_mode & FMODE_READ) { | 2782 | if (file->f_mode & FMODE_READ) { |
| 2243 | struct seq_file *m = file->private_data; | 2783 | struct seq_file *m = file->private_data; |
| 2244 | iter = m->private; | 2784 | iter = m->private; |
| @@ -2250,7 +2790,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
| 2250 | 2790 | ||
| 2251 | if (read >= 0 && trace_parser_loaded(parser) && | 2791 | if (read >= 0 && trace_parser_loaded(parser) && |
| 2252 | !trace_parser_cont(parser)) { | 2792 | !trace_parser_cont(parser)) { |
| 2253 | ret = ftrace_process_regex(parser->buffer, | 2793 | ret = ftrace_process_regex(iter->hash, parser->buffer, |
| 2254 | parser->idx, enable); | 2794 | parser->idx, enable); |
| 2255 | trace_parser_clear(parser); | 2795 | trace_parser_clear(parser); |
| 2256 | if (ret) | 2796 | if (ret) |
| @@ -2278,22 +2818,49 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
| 2278 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 2818 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
| 2279 | } | 2819 | } |
| 2280 | 2820 | ||
| 2281 | static void | 2821 | static int |
| 2282 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | 2822 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
| 2823 | int reset, int enable) | ||
| 2283 | { | 2824 | { |
| 2825 | struct ftrace_hash **orig_hash; | ||
| 2826 | struct ftrace_hash *hash; | ||
| 2827 | int ret; | ||
| 2828 | |||
| 2829 | /* All global ops uses the global ops filters */ | ||
| 2830 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) | ||
| 2831 | ops = &global_ops; | ||
| 2832 | |||
| 2284 | if (unlikely(ftrace_disabled)) | 2833 | if (unlikely(ftrace_disabled)) |
| 2285 | return; | 2834 | return -ENODEV; |
| 2835 | |||
| 2836 | if (enable) | ||
| 2837 | orig_hash = &ops->filter_hash; | ||
| 2838 | else | ||
| 2839 | orig_hash = &ops->notrace_hash; | ||
| 2840 | |||
| 2841 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
| 2842 | if (!hash) | ||
| 2843 | return -ENOMEM; | ||
| 2286 | 2844 | ||
| 2287 | mutex_lock(&ftrace_regex_lock); | 2845 | mutex_lock(&ftrace_regex_lock); |
| 2288 | if (reset) | 2846 | if (reset) |
| 2289 | ftrace_filter_reset(enable); | 2847 | ftrace_filter_reset(hash); |
| 2290 | if (buf) | 2848 | if (buf) |
| 2291 | ftrace_match_records(buf, len, enable); | 2849 | ftrace_match_records(hash, buf, len); |
| 2850 | |||
| 2851 | mutex_lock(&ftrace_lock); | ||
| 2852 | ret = ftrace_hash_move(orig_hash, hash); | ||
| 2853 | mutex_unlock(&ftrace_lock); | ||
| 2854 | |||
| 2292 | mutex_unlock(&ftrace_regex_lock); | 2855 | mutex_unlock(&ftrace_regex_lock); |
| 2856 | |||
| 2857 | free_ftrace_hash(hash); | ||
| 2858 | return ret; | ||
| 2293 | } | 2859 | } |
| 2294 | 2860 | ||
| 2295 | /** | 2861 | /** |
| 2296 | * ftrace_set_filter - set a function to filter on in ftrace | 2862 | * ftrace_set_filter - set a function to filter on in ftrace |
| 2863 | * @ops - the ops to set the filter with | ||
| 2297 | * @buf - the string that holds the function filter text. | 2864 | * @buf - the string that holds the function filter text. |
| 2298 | * @len - the length of the string. | 2865 | * @len - the length of the string. |
| 2299 | * @reset - non zero to reset all filters before applying this filter. | 2866 | * @reset - non zero to reset all filters before applying this filter. |
| @@ -2301,13 +2868,16 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | |||
| 2301 | * Filters denote which functions should be enabled when tracing is enabled. | 2868 | * Filters denote which functions should be enabled when tracing is enabled. |
| 2302 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 2869 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
| 2303 | */ | 2870 | */ |
| 2304 | void ftrace_set_filter(unsigned char *buf, int len, int reset) | 2871 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
| 2872 | int len, int reset) | ||
| 2305 | { | 2873 | { |
| 2306 | ftrace_set_regex(buf, len, reset, 1); | 2874 | ftrace_set_regex(ops, buf, len, reset, 1); |
| 2307 | } | 2875 | } |
| 2876 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | ||
| 2308 | 2877 | ||
| 2309 | /** | 2878 | /** |
| 2310 | * ftrace_set_notrace - set a function to not trace in ftrace | 2879 | * ftrace_set_notrace - set a function to not trace in ftrace |
| 2880 | * @ops - the ops to set the notrace filter with | ||
| 2311 | * @buf - the string that holds the function notrace text. | 2881 | * @buf - the string that holds the function notrace text. |
| 2312 | * @len - the length of the string. | 2882 | * @len - the length of the string. |
| 2313 | * @reset - non zero to reset all filters before applying this filter. | 2883 | * @reset - non zero to reset all filters before applying this filter. |
| @@ -2316,10 +2886,44 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) | |||
| 2316 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 2886 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
| 2317 | * for tracing. | 2887 | * for tracing. |
| 2318 | */ | 2888 | */ |
| 2319 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) | 2889 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
| 2890 | int len, int reset) | ||
| 2320 | { | 2891 | { |
| 2321 | ftrace_set_regex(buf, len, reset, 0); | 2892 | ftrace_set_regex(ops, buf, len, reset, 0); |
| 2322 | } | 2893 | } |
| 2894 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | ||
| 2895 | /** | ||
| 2896 | * ftrace_set_filter - set a function to filter on in ftrace | ||
| 2897 | * @ops - the ops to set the filter with | ||
| 2898 | * @buf - the string that holds the function filter text. | ||
| 2899 | * @len - the length of the string. | ||
| 2900 | * @reset - non zero to reset all filters before applying this filter. | ||
| 2901 | * | ||
| 2902 | * Filters denote which functions should be enabled when tracing is enabled. | ||
| 2903 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | ||
| 2904 | */ | ||
| 2905 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | ||
| 2906 | { | ||
| 2907 | ftrace_set_regex(&global_ops, buf, len, reset, 1); | ||
| 2908 | } | ||
| 2909 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | ||
| 2910 | |||
| 2911 | /** | ||
| 2912 | * ftrace_set_notrace - set a function to not trace in ftrace | ||
| 2913 | * @ops - the ops to set the notrace filter with | ||
| 2914 | * @buf - the string that holds the function notrace text. | ||
| 2915 | * @len - the length of the string. | ||
| 2916 | * @reset - non zero to reset all filters before applying this filter. | ||
| 2917 | * | ||
| 2918 | * Notrace Filters denote which functions should not be enabled when tracing | ||
| 2919 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | ||
| 2920 | * for tracing. | ||
| 2921 | */ | ||
| 2922 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) | ||
| 2923 | { | ||
| 2924 | ftrace_set_regex(&global_ops, buf, len, reset, 0); | ||
| 2925 | } | ||
| 2926 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | ||
| 2323 | 2927 | ||
| 2324 | /* | 2928 | /* |
| 2325 | * command line interface to allow users to set filters on boot up. | 2929 | * command line interface to allow users to set filters on boot up. |
| @@ -2370,22 +2974,23 @@ static void __init set_ftrace_early_graph(char *buf) | |||
| 2370 | } | 2974 | } |
| 2371 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2975 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 2372 | 2976 | ||
| 2373 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2977 | static void __init |
| 2978 | set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable) | ||
| 2374 | { | 2979 | { |
| 2375 | char *func; | 2980 | char *func; |
| 2376 | 2981 | ||
| 2377 | while (buf) { | 2982 | while (buf) { |
| 2378 | func = strsep(&buf, ","); | 2983 | func = strsep(&buf, ","); |
| 2379 | ftrace_set_regex(func, strlen(func), 0, enable); | 2984 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
| 2380 | } | 2985 | } |
| 2381 | } | 2986 | } |
| 2382 | 2987 | ||
| 2383 | static void __init set_ftrace_early_filters(void) | 2988 | static void __init set_ftrace_early_filters(void) |
| 2384 | { | 2989 | { |
| 2385 | if (ftrace_filter_buf[0]) | 2990 | if (ftrace_filter_buf[0]) |
| 2386 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2991 | set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1); |
| 2387 | if (ftrace_notrace_buf[0]) | 2992 | if (ftrace_notrace_buf[0]) |
| 2388 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2993 | set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0); |
| 2389 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2994 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 2390 | if (ftrace_graph_buf[0]) | 2995 | if (ftrace_graph_buf[0]) |
| 2391 | set_ftrace_early_graph(ftrace_graph_buf); | 2996 | set_ftrace_early_graph(ftrace_graph_buf); |
| @@ -2393,11 +2998,14 @@ static void __init set_ftrace_early_filters(void) | |||
| 2393 | } | 2998 | } |
| 2394 | 2999 | ||
| 2395 | static int | 3000 | static int |
| 2396 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) | 3001 | ftrace_regex_release(struct inode *inode, struct file *file) |
| 2397 | { | 3002 | { |
| 2398 | struct seq_file *m = (struct seq_file *)file->private_data; | 3003 | struct seq_file *m = (struct seq_file *)file->private_data; |
| 2399 | struct ftrace_iterator *iter; | 3004 | struct ftrace_iterator *iter; |
| 3005 | struct ftrace_hash **orig_hash; | ||
| 2400 | struct trace_parser *parser; | 3006 | struct trace_parser *parser; |
| 3007 | int filter_hash; | ||
| 3008 | int ret; | ||
| 2401 | 3009 | ||
| 2402 | mutex_lock(&ftrace_regex_lock); | 3010 | mutex_lock(&ftrace_regex_lock); |
| 2403 | if (file->f_mode & FMODE_READ) { | 3011 | if (file->f_mode & FMODE_READ) { |
| @@ -2410,33 +3018,41 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
| 2410 | parser = &iter->parser; | 3018 | parser = &iter->parser; |
| 2411 | if (trace_parser_loaded(parser)) { | 3019 | if (trace_parser_loaded(parser)) { |
| 2412 | parser->buffer[parser->idx] = 0; | 3020 | parser->buffer[parser->idx] = 0; |
| 2413 | ftrace_match_records(parser->buffer, parser->idx, enable); | 3021 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); |
| 2414 | } | 3022 | } |
| 2415 | 3023 | ||
| 2416 | mutex_lock(&ftrace_lock); | ||
| 2417 | if (ftrace_start_up && ftrace_enabled) | ||
| 2418 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | ||
| 2419 | mutex_unlock(&ftrace_lock); | ||
| 2420 | |||
| 2421 | trace_parser_put(parser); | 3024 | trace_parser_put(parser); |
| 3025 | |||
| 3026 | if (file->f_mode & FMODE_WRITE) { | ||
| 3027 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | ||
| 3028 | |||
| 3029 | if (filter_hash) | ||
| 3030 | orig_hash = &iter->ops->filter_hash; | ||
| 3031 | else | ||
| 3032 | orig_hash = &iter->ops->notrace_hash; | ||
| 3033 | |||
| 3034 | mutex_lock(&ftrace_lock); | ||
| 3035 | /* | ||
| 3036 | * Remove the current set, update the hash and add | ||
| 3037 | * them back. | ||
| 3038 | */ | ||
| 3039 | ftrace_hash_rec_disable(iter->ops, filter_hash); | ||
| 3040 | ret = ftrace_hash_move(orig_hash, iter->hash); | ||
| 3041 | if (!ret) { | ||
| 3042 | ftrace_hash_rec_enable(iter->ops, filter_hash); | ||
| 3043 | if (iter->ops->flags & FTRACE_OPS_FL_ENABLED | ||
| 3044 | && ftrace_enabled) | ||
| 3045 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | ||
| 3046 | } | ||
| 3047 | mutex_unlock(&ftrace_lock); | ||
| 3048 | } | ||
| 3049 | free_ftrace_hash(iter->hash); | ||
| 2422 | kfree(iter); | 3050 | kfree(iter); |
| 2423 | 3051 | ||
| 2424 | mutex_unlock(&ftrace_regex_lock); | 3052 | mutex_unlock(&ftrace_regex_lock); |
| 2425 | return 0; | 3053 | return 0; |
| 2426 | } | 3054 | } |
| 2427 | 3055 | ||
| 2428 | static int | ||
| 2429 | ftrace_filter_release(struct inode *inode, struct file *file) | ||
| 2430 | { | ||
| 2431 | return ftrace_regex_release(inode, file, 1); | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | static int | ||
| 2435 | ftrace_notrace_release(struct inode *inode, struct file *file) | ||
| 2436 | { | ||
| 2437 | return ftrace_regex_release(inode, file, 0); | ||
| 2438 | } | ||
| 2439 | |||
| 2440 | static const struct file_operations ftrace_avail_fops = { | 3056 | static const struct file_operations ftrace_avail_fops = { |
| 2441 | .open = ftrace_avail_open, | 3057 | .open = ftrace_avail_open, |
| 2442 | .read = seq_read, | 3058 | .read = seq_read, |
| @@ -2444,8 +3060,8 @@ static const struct file_operations ftrace_avail_fops = { | |||
| 2444 | .release = seq_release_private, | 3060 | .release = seq_release_private, |
| 2445 | }; | 3061 | }; |
| 2446 | 3062 | ||
| 2447 | static const struct file_operations ftrace_failures_fops = { | 3063 | static const struct file_operations ftrace_enabled_fops = { |
| 2448 | .open = ftrace_failures_open, | 3064 | .open = ftrace_enabled_open, |
| 2449 | .read = seq_read, | 3065 | .read = seq_read, |
| 2450 | .llseek = seq_lseek, | 3066 | .llseek = seq_lseek, |
| 2451 | .release = seq_release_private, | 3067 | .release = seq_release_private, |
| @@ -2456,7 +3072,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
| 2456 | .read = seq_read, | 3072 | .read = seq_read, |
| 2457 | .write = ftrace_filter_write, | 3073 | .write = ftrace_filter_write, |
| 2458 | .llseek = ftrace_regex_lseek, | 3074 | .llseek = ftrace_regex_lseek, |
| 2459 | .release = ftrace_filter_release, | 3075 | .release = ftrace_regex_release, |
| 2460 | }; | 3076 | }; |
| 2461 | 3077 | ||
| 2462 | static const struct file_operations ftrace_notrace_fops = { | 3078 | static const struct file_operations ftrace_notrace_fops = { |
| @@ -2464,7 +3080,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
| 2464 | .read = seq_read, | 3080 | .read = seq_read, |
| 2465 | .write = ftrace_notrace_write, | 3081 | .write = ftrace_notrace_write, |
| 2466 | .llseek = ftrace_regex_lseek, | 3082 | .llseek = ftrace_regex_lseek, |
| 2467 | .release = ftrace_notrace_release, | 3083 | .release = ftrace_regex_release, |
| 2468 | }; | 3084 | }; |
| 2469 | 3085 | ||
| 2470 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3086 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| @@ -2573,9 +3189,6 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 2573 | bool exists; | 3189 | bool exists; |
| 2574 | int i; | 3190 | int i; |
| 2575 | 3191 | ||
| 2576 | if (ftrace_disabled) | ||
| 2577 | return -ENODEV; | ||
| 2578 | |||
| 2579 | /* decode regex */ | 3192 | /* decode regex */ |
| 2580 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | 3193 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
| 2581 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) | 3194 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) |
| @@ -2584,12 +3197,18 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 2584 | search_len = strlen(search); | 3197 | search_len = strlen(search); |
| 2585 | 3198 | ||
| 2586 | mutex_lock(&ftrace_lock); | 3199 | mutex_lock(&ftrace_lock); |
| 3200 | |||
| 3201 | if (unlikely(ftrace_disabled)) { | ||
| 3202 | mutex_unlock(&ftrace_lock); | ||
| 3203 | return -ENODEV; | ||
| 3204 | } | ||
| 3205 | |||
| 2587 | do_for_each_ftrace_rec(pg, rec) { | 3206 | do_for_each_ftrace_rec(pg, rec) { |
| 2588 | 3207 | ||
| 2589 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 3208 | if (rec->flags & FTRACE_FL_FREE) |
| 2590 | continue; | 3209 | continue; |
| 2591 | 3210 | ||
| 2592 | if (ftrace_match_record(rec, search, search_len, type)) { | 3211 | if (ftrace_match_record(rec, NULL, search, search_len, type)) { |
| 2593 | /* if it is in the array */ | 3212 | /* if it is in the array */ |
| 2594 | exists = false; | 3213 | exists = false; |
| 2595 | for (i = 0; i < *idx; i++) { | 3214 | for (i = 0; i < *idx; i++) { |
| @@ -2679,8 +3298,8 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
| 2679 | trace_create_file("available_filter_functions", 0444, | 3298 | trace_create_file("available_filter_functions", 0444, |
| 2680 | d_tracer, NULL, &ftrace_avail_fops); | 3299 | d_tracer, NULL, &ftrace_avail_fops); |
| 2681 | 3300 | ||
| 2682 | trace_create_file("failures", 0444, | 3301 | trace_create_file("enabled_functions", 0444, |
| 2683 | d_tracer, NULL, &ftrace_failures_fops); | 3302 | d_tracer, NULL, &ftrace_enabled_fops); |
| 2684 | 3303 | ||
| 2685 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 3304 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
| 2686 | NULL, &ftrace_filter_fops); | 3305 | NULL, &ftrace_filter_fops); |
| @@ -2703,7 +3322,6 @@ static int ftrace_process_locs(struct module *mod, | |||
| 2703 | { | 3322 | { |
| 2704 | unsigned long *p; | 3323 | unsigned long *p; |
| 2705 | unsigned long addr; | 3324 | unsigned long addr; |
| 2706 | unsigned long flags; | ||
| 2707 | 3325 | ||
| 2708 | mutex_lock(&ftrace_lock); | 3326 | mutex_lock(&ftrace_lock); |
| 2709 | p = start; | 3327 | p = start; |
| @@ -2720,10 +3338,7 @@ static int ftrace_process_locs(struct module *mod, | |||
| 2720 | ftrace_record_ip(addr); | 3338 | ftrace_record_ip(addr); |
| 2721 | } | 3339 | } |
| 2722 | 3340 | ||
| 2723 | /* disable interrupts to prevent kstop machine */ | ||
| 2724 | local_irq_save(flags); | ||
| 2725 | ftrace_update_code(mod); | 3341 | ftrace_update_code(mod); |
| 2726 | local_irq_restore(flags); | ||
| 2727 | mutex_unlock(&ftrace_lock); | 3342 | mutex_unlock(&ftrace_lock); |
| 2728 | 3343 | ||
| 2729 | return 0; | 3344 | return 0; |
| @@ -2735,10 +3350,11 @@ void ftrace_release_mod(struct module *mod) | |||
| 2735 | struct dyn_ftrace *rec; | 3350 | struct dyn_ftrace *rec; |
| 2736 | struct ftrace_page *pg; | 3351 | struct ftrace_page *pg; |
| 2737 | 3352 | ||
| 3353 | mutex_lock(&ftrace_lock); | ||
| 3354 | |||
| 2738 | if (ftrace_disabled) | 3355 | if (ftrace_disabled) |
| 2739 | return; | 3356 | goto out_unlock; |
| 2740 | 3357 | ||
| 2741 | mutex_lock(&ftrace_lock); | ||
| 2742 | do_for_each_ftrace_rec(pg, rec) { | 3358 | do_for_each_ftrace_rec(pg, rec) { |
| 2743 | if (within_module_core(rec->ip, mod)) { | 3359 | if (within_module_core(rec->ip, mod)) { |
| 2744 | /* | 3360 | /* |
| @@ -2749,6 +3365,7 @@ void ftrace_release_mod(struct module *mod) | |||
| 2749 | ftrace_free_rec(rec); | 3365 | ftrace_free_rec(rec); |
| 2750 | } | 3366 | } |
| 2751 | } while_for_each_ftrace_rec(); | 3367 | } while_for_each_ftrace_rec(); |
| 3368 | out_unlock: | ||
| 2752 | mutex_unlock(&ftrace_lock); | 3369 | mutex_unlock(&ftrace_lock); |
| 2753 | } | 3370 | } |
| 2754 | 3371 | ||
| @@ -2835,6 +3452,10 @@ void __init ftrace_init(void) | |||
| 2835 | 3452 | ||
| 2836 | #else | 3453 | #else |
| 2837 | 3454 | ||
| 3455 | static struct ftrace_ops global_ops = { | ||
| 3456 | .func = ftrace_stub, | ||
| 3457 | }; | ||
| 3458 | |||
| 2838 | static int __init ftrace_nodyn_init(void) | 3459 | static int __init ftrace_nodyn_init(void) |
| 2839 | { | 3460 | { |
| 2840 | ftrace_enabled = 1; | 3461 | ftrace_enabled = 1; |
| @@ -2845,12 +3466,38 @@ device_initcall(ftrace_nodyn_init); | |||
| 2845 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 3466 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
| 2846 | static inline void ftrace_startup_enable(int command) { } | 3467 | static inline void ftrace_startup_enable(int command) { } |
| 2847 | /* Keep as macros so we do not need to define the commands */ | 3468 | /* Keep as macros so we do not need to define the commands */ |
| 2848 | # define ftrace_startup(command) do { } while (0) | 3469 | # define ftrace_startup(ops, command) do { } while (0) |
| 2849 | # define ftrace_shutdown(command) do { } while (0) | 3470 | # define ftrace_shutdown(ops, command) do { } while (0) |
| 2850 | # define ftrace_startup_sysctl() do { } while (0) | 3471 | # define ftrace_startup_sysctl() do { } while (0) |
| 2851 | # define ftrace_shutdown_sysctl() do { } while (0) | 3472 | # define ftrace_shutdown_sysctl() do { } while (0) |
| 3473 | |||
| 3474 | static inline int | ||
| 3475 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
| 3476 | { | ||
| 3477 | return 1; | ||
| 3478 | } | ||
| 3479 | |||
| 2852 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 3480 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 2853 | 3481 | ||
| 3482 | static void | ||
| 3483 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | ||
| 3484 | { | ||
| 3485 | struct ftrace_ops *op; | ||
| 3486 | |||
| 3487 | /* | ||
| 3488 | * Some of the ops may be dynamically allocated, | ||
| 3489 | * they must be freed after a synchronize_sched(). | ||
| 3490 | */ | ||
| 3491 | preempt_disable_notrace(); | ||
| 3492 | op = rcu_dereference_raw(ftrace_ops_list); | ||
| 3493 | while (op != &ftrace_list_end) { | ||
| 3494 | if (ftrace_ops_test(op, ip)) | ||
| 3495 | op->func(ip, parent_ip); | ||
| 3496 | op = rcu_dereference_raw(op->next); | ||
| 3497 | }; | ||
| 3498 | preempt_enable_notrace(); | ||
| 3499 | } | ||
| 3500 | |||
| 2854 | static void clear_ftrace_swapper(void) | 3501 | static void clear_ftrace_swapper(void) |
| 2855 | { | 3502 | { |
| 2856 | struct task_struct *p; | 3503 | struct task_struct *p; |
| @@ -3143,19 +3790,23 @@ void ftrace_kill(void) | |||
| 3143 | */ | 3790 | */ |
| 3144 | int register_ftrace_function(struct ftrace_ops *ops) | 3791 | int register_ftrace_function(struct ftrace_ops *ops) |
| 3145 | { | 3792 | { |
| 3146 | int ret; | 3793 | int ret = -1; |
| 3147 | |||
| 3148 | if (unlikely(ftrace_disabled)) | ||
| 3149 | return -1; | ||
| 3150 | 3794 | ||
| 3151 | mutex_lock(&ftrace_lock); | 3795 | mutex_lock(&ftrace_lock); |
| 3152 | 3796 | ||
| 3797 | if (unlikely(ftrace_disabled)) | ||
| 3798 | goto out_unlock; | ||
| 3799 | |||
| 3153 | ret = __register_ftrace_function(ops); | 3800 | ret = __register_ftrace_function(ops); |
| 3154 | ftrace_startup(0); | 3801 | if (!ret) |
| 3802 | ftrace_startup(ops, 0); | ||
| 3155 | 3803 | ||
| 3804 | |||
| 3805 | out_unlock: | ||
| 3156 | mutex_unlock(&ftrace_lock); | 3806 | mutex_unlock(&ftrace_lock); |
| 3157 | return ret; | 3807 | return ret; |
| 3158 | } | 3808 | } |
| 3809 | EXPORT_SYMBOL_GPL(register_ftrace_function); | ||
| 3159 | 3810 | ||
| 3160 | /** | 3811 | /** |
| 3161 | * unregister_ftrace_function - unregister a function for profiling. | 3812 | * unregister_ftrace_function - unregister a function for profiling. |
| @@ -3169,25 +3820,27 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 3169 | 3820 | ||
| 3170 | mutex_lock(&ftrace_lock); | 3821 | mutex_lock(&ftrace_lock); |
| 3171 | ret = __unregister_ftrace_function(ops); | 3822 | ret = __unregister_ftrace_function(ops); |
| 3172 | ftrace_shutdown(0); | 3823 | if (!ret) |
| 3824 | ftrace_shutdown(ops, 0); | ||
| 3173 | mutex_unlock(&ftrace_lock); | 3825 | mutex_unlock(&ftrace_lock); |
| 3174 | 3826 | ||
| 3175 | return ret; | 3827 | return ret; |
| 3176 | } | 3828 | } |
| 3829 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | ||
| 3177 | 3830 | ||
| 3178 | int | 3831 | int |
| 3179 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3832 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
| 3180 | void __user *buffer, size_t *lenp, | 3833 | void __user *buffer, size_t *lenp, |
| 3181 | loff_t *ppos) | 3834 | loff_t *ppos) |
| 3182 | { | 3835 | { |
| 3183 | int ret; | 3836 | int ret = -ENODEV; |
| 3184 | |||
| 3185 | if (unlikely(ftrace_disabled)) | ||
| 3186 | return -ENODEV; | ||
| 3187 | 3837 | ||
| 3188 | mutex_lock(&ftrace_lock); | 3838 | mutex_lock(&ftrace_lock); |
| 3189 | 3839 | ||
| 3190 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | 3840 | if (unlikely(ftrace_disabled)) |
| 3841 | goto out; | ||
| 3842 | |||
| 3843 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | ||
| 3191 | 3844 | ||
| 3192 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 3845 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
| 3193 | goto out; | 3846 | goto out; |
| @@ -3199,11 +3852,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 3199 | ftrace_startup_sysctl(); | 3852 | ftrace_startup_sysctl(); |
| 3200 | 3853 | ||
| 3201 | /* we are starting ftrace again */ | 3854 | /* we are starting ftrace again */ |
| 3202 | if (ftrace_list != &ftrace_list_end) { | 3855 | if (ftrace_ops_list != &ftrace_list_end) { |
| 3203 | if (ftrace_list->next == &ftrace_list_end) | 3856 | if (ftrace_ops_list->next == &ftrace_list_end) |
| 3204 | ftrace_trace_function = ftrace_list->func; | 3857 | ftrace_trace_function = ftrace_ops_list->func; |
| 3205 | else | 3858 | else |
| 3206 | ftrace_trace_function = ftrace_list_func; | 3859 | ftrace_trace_function = ftrace_ops_list_func; |
| 3207 | } | 3860 | } |
| 3208 | 3861 | ||
| 3209 | } else { | 3862 | } else { |
| @@ -3392,7 +4045,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 3392 | ftrace_graph_return = retfunc; | 4045 | ftrace_graph_return = retfunc; |
| 3393 | ftrace_graph_entry = entryfunc; | 4046 | ftrace_graph_entry = entryfunc; |
| 3394 | 4047 | ||
| 3395 | ftrace_startup(FTRACE_START_FUNC_RET); | 4048 | ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
| 3396 | 4049 | ||
| 3397 | out: | 4050 | out: |
| 3398 | mutex_unlock(&ftrace_lock); | 4051 | mutex_unlock(&ftrace_lock); |
| @@ -3409,7 +4062,7 @@ void unregister_ftrace_graph(void) | |||
| 3409 | ftrace_graph_active--; | 4062 | ftrace_graph_active--; |
| 3410 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 4063 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 3411 | ftrace_graph_entry = ftrace_graph_entry_stub; | 4064 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 3412 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 4065 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); |
| 3413 | unregister_pm_notifier(&ftrace_suspend_notifier); | 4066 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 3414 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 4067 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 3415 | 4068 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1cb49be7c7fb..ee9c921d7f21 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -2014,9 +2014,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2014 | { | 2014 | { |
| 2015 | enum print_line_t ret; | 2015 | enum print_line_t ret; |
| 2016 | 2016 | ||
| 2017 | if (iter->lost_events) | 2017 | if (iter->lost_events && |
| 2018 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2018 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
| 2019 | iter->cpu, iter->lost_events); | 2019 | iter->cpu, iter->lost_events)) |
| 2020 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2020 | 2021 | ||
| 2021 | if (iter->trace && iter->trace->print_line) { | 2022 | if (iter->trace && iter->trace->print_line) { |
| 2022 | ret = iter->trace->print_line(iter); | 2023 | ret = iter->trace->print_line(iter); |
| @@ -3230,6 +3231,14 @@ waitagain: | |||
| 3230 | 3231 | ||
| 3231 | if (iter->seq.len >= cnt) | 3232 | if (iter->seq.len >= cnt) |
| 3232 | break; | 3233 | break; |
| 3234 | |||
| 3235 | /* | ||
| 3236 | * Setting the full flag means we reached the trace_seq buffer | ||
| 3237 | * size and we should leave by partial output condition above. | ||
| 3238 | * One of the trace_seq_* functions is not used properly. | ||
| 3239 | */ | ||
| 3240 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | ||
| 3241 | iter->ent->type); | ||
| 3233 | } | 3242 | } |
| 3234 | trace_access_unlock(iter->cpu_file); | 3243 | trace_access_unlock(iter->cpu_file); |
| 3235 | trace_event_read_unlock(); | 3244 | trace_event_read_unlock(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5e9dfc6286dd..6b69c4bd306f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -419,6 +419,8 @@ extern void trace_find_cmdline(int pid, char comm[]); | |||
| 419 | extern unsigned long ftrace_update_tot_cnt; | 419 | extern unsigned long ftrace_update_tot_cnt; |
| 420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
| 421 | extern int DYN_FTRACE_TEST_NAME(void); | 421 | extern int DYN_FTRACE_TEST_NAME(void); |
| 422 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 | ||
| 423 | extern int DYN_FTRACE_TEST_NAME2(void); | ||
| 422 | #endif | 424 | #endif |
| 423 | 425 | ||
| 424 | extern int ring_buffer_expanded; | 426 | extern int ring_buffer_expanded; |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 16aee4d44e8f..8d0e1cc4e974 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 149 | static struct ftrace_ops trace_ops __read_mostly = | 149 | static struct ftrace_ops trace_ops __read_mostly = |
| 150 | { | 150 | { |
| 151 | .func = function_trace_call, | 151 | .func = function_trace_call, |
| 152 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 152 | }; | 153 | }; |
| 153 | 154 | ||
| 154 | static struct ftrace_ops trace_stack_ops __read_mostly = | 155 | static struct ftrace_ops trace_stack_ops __read_mostly = |
| 155 | { | 156 | { |
| 156 | .func = function_stack_trace_call, | 157 | .func = function_stack_trace_call, |
| 158 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 157 | }; | 159 | }; |
| 158 | 160 | ||
| 159 | /* Our two options */ | 161 | /* Our two options */ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a4969b47afc1..c77424be284d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 153 | static struct ftrace_ops trace_ops __read_mostly = | 153 | static struct ftrace_ops trace_ops __read_mostly = |
| 154 | { | 154 | { |
| 155 | .func = irqsoff_tracer_call, | 155 | .func = irqsoff_tracer_call, |
| 156 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 156 | }; | 157 | }; |
| 157 | #endif /* CONFIG_FUNCTION_TRACER */ | 158 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 158 | 159 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 456be9063c2d..cf535ccedc86 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); | |||
| 830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, | 830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, |
| 831 | struct trace_event *event) | 831 | struct trace_event *event) |
| 832 | { | 832 | { |
| 833 | if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) | ||
| 834 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 835 | |||
| 833 | return TRACE_TYPE_HANDLED; | 836 | return TRACE_TYPE_HANDLED; |
| 834 | } | 837 | } |
| 835 | 838 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2547d8813cf0..dff763b7baf1 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
| @@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); | |||
| 32 | 32 | ||
| 33 | struct trace_bprintk_fmt { | 33 | struct trace_bprintk_fmt { |
| 34 | struct list_head list; | 34 | struct list_head list; |
| 35 | char fmt[0]; | 35 | const char *fmt; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) | 38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) |
| @@ -49,6 +49,7 @@ static | |||
| 49 | void hold_module_trace_bprintk_format(const char **start, const char **end) | 49 | void hold_module_trace_bprintk_format(const char **start, const char **end) |
| 50 | { | 50 | { |
| 51 | const char **iter; | 51 | const char **iter; |
| 52 | char *fmt; | ||
| 52 | 53 | ||
| 53 | mutex_lock(&btrace_mutex); | 54 | mutex_lock(&btrace_mutex); |
| 54 | for (iter = start; iter < end; iter++) { | 55 | for (iter = start; iter < end; iter++) { |
| @@ -58,14 +59,18 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
| 58 | continue; | 59 | continue; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) | 62 | tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); |
| 62 | + strlen(*iter) + 1, GFP_KERNEL); | 63 | if (tb_fmt) |
| 63 | if (tb_fmt) { | 64 | fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); |
| 65 | if (tb_fmt && fmt) { | ||
| 64 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); | 66 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); |
| 65 | strcpy(tb_fmt->fmt, *iter); | 67 | strcpy(fmt, *iter); |
| 68 | tb_fmt->fmt = fmt; | ||
| 66 | *iter = tb_fmt->fmt; | 69 | *iter = tb_fmt->fmt; |
| 67 | } else | 70 | } else { |
| 71 | kfree(tb_fmt); | ||
| 68 | *iter = NULL; | 72 | *iter = NULL; |
| 73 | } | ||
| 69 | } | 74 | } |
| 70 | mutex_unlock(&btrace_mutex); | 75 | mutex_unlock(&btrace_mutex); |
| 71 | } | 76 | } |
| @@ -84,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, | |||
| 84 | return 0; | 89 | return 0; |
| 85 | } | 90 | } |
| 86 | 91 | ||
| 92 | /* | ||
| 93 | * The debugfs/tracing/printk_formats file maps the addresses with | ||
| 94 | * the ASCII formats that are used in the bprintk events in the | ||
| 95 | * buffer. For userspace tools to be able to decode the events from | ||
| 96 | * the buffer, they need to be able to map the address with the format. | ||
| 97 | * | ||
| 98 | * The addresses of the bprintk formats are in their own section | ||
| 99 | * __trace_printk_fmt. But for modules we copy them into a link list. | ||
| 100 | * The code to print the formats and their addresses passes around the | ||
| 101 | * address of the fmt string. If the fmt address passed into the seq | ||
| 102 | * functions is within the kernel core __trace_printk_fmt section, then | ||
| 103 | * it simply uses the next pointer in the list. | ||
| 104 | * | ||
| 105 | * When the fmt pointer is outside the kernel core __trace_printk_fmt | ||
| 106 | * section, then we need to read the link list pointers. The trick is | ||
| 107 | * we pass the address of the string to the seq function just like | ||
| 108 | * we do for the kernel core formats. To get back the structure that | ||
| 109 | * holds the format, we simply use containerof() and then go to the | ||
| 110 | * next format in the list. | ||
| 111 | */ | ||
| 112 | static const char ** | ||
| 113 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
| 114 | { | ||
| 115 | struct trace_bprintk_fmt *mod_fmt; | ||
| 116 | |||
| 117 | if (list_empty(&trace_bprintk_fmt_list)) | ||
| 118 | return NULL; | ||
| 119 | |||
| 120 | /* | ||
| 121 | * v will point to the address of the fmt record from t_next | ||
| 122 | * v will be NULL from t_start. | ||
| 123 | * If this is the first pointer or called from start | ||
| 124 | * then we need to walk the list. | ||
| 125 | */ | ||
| 126 | if (!v || start_index == *pos) { | ||
| 127 | struct trace_bprintk_fmt *p; | ||
| 128 | |||
| 129 | /* search the module list */ | ||
| 130 | list_for_each_entry(p, &trace_bprintk_fmt_list, list) { | ||
| 131 | if (start_index == *pos) | ||
| 132 | return &p->fmt; | ||
| 133 | start_index++; | ||
| 134 | } | ||
| 135 | /* pos > index */ | ||
| 136 | return NULL; | ||
| 137 | } | ||
| 138 | |||
| 139 | /* | ||
| 140 | * v points to the address of the fmt field in the mod list | ||
| 141 | * structure that holds the module print format. | ||
| 142 | */ | ||
| 143 | mod_fmt = container_of(v, typeof(*mod_fmt), fmt); | ||
| 144 | if (mod_fmt->list.next == &trace_bprintk_fmt_list) | ||
| 145 | return NULL; | ||
| 146 | |||
| 147 | mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); | ||
| 148 | |||
| 149 | return &mod_fmt->fmt; | ||
| 150 | } | ||
| 151 | |||
| 152 | static void format_mod_start(void) | ||
| 153 | { | ||
| 154 | mutex_lock(&btrace_mutex); | ||
| 155 | } | ||
| 156 | |||
| 157 | static void format_mod_stop(void) | ||
| 158 | { | ||
| 159 | mutex_unlock(&btrace_mutex); | ||
| 160 | } | ||
| 161 | |||
| 87 | #else /* !CONFIG_MODULES */ | 162 | #else /* !CONFIG_MODULES */ |
| 88 | __init static int | 163 | __init static int |
| 89 | module_trace_bprintk_format_notify(struct notifier_block *self, | 164 | module_trace_bprintk_format_notify(struct notifier_block *self, |
| @@ -91,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self, | |||
| 91 | { | 166 | { |
| 92 | return 0; | 167 | return 0; |
| 93 | } | 168 | } |
| 169 | static inline const char ** | ||
| 170 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
| 171 | { | ||
| 172 | return NULL; | ||
| 173 | } | ||
| 174 | static inline void format_mod_start(void) { } | ||
| 175 | static inline void format_mod_stop(void) { } | ||
| 94 | #endif /* CONFIG_MODULES */ | 176 | #endif /* CONFIG_MODULES */ |
| 95 | 177 | ||
| 96 | 178 | ||
| @@ -153,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | |||
| 153 | } | 235 | } |
| 154 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | 236 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); |
| 155 | 237 | ||
| 238 | static const char **find_next(void *v, loff_t *pos) | ||
| 239 | { | ||
| 240 | const char **fmt = v; | ||
| 241 | int start_index; | ||
| 242 | |||
| 243 | if (!fmt) | ||
| 244 | fmt = __start___trace_bprintk_fmt + *pos; | ||
| 245 | |||
| 246 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; | ||
| 247 | |||
| 248 | if (*pos < start_index) | ||
| 249 | return fmt; | ||
| 250 | |||
| 251 | return find_next_mod_format(start_index, v, fmt, pos); | ||
| 252 | } | ||
| 253 | |||
| 156 | static void * | 254 | static void * |
| 157 | t_start(struct seq_file *m, loff_t *pos) | 255 | t_start(struct seq_file *m, loff_t *pos) |
| 158 | { | 256 | { |
| 159 | const char **fmt = __start___trace_bprintk_fmt + *pos; | 257 | format_mod_start(); |
| 160 | 258 | return find_next(NULL, pos); | |
| 161 | if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) | ||
| 162 | return NULL; | ||
| 163 | return fmt; | ||
| 164 | } | 259 | } |
| 165 | 260 | ||
| 166 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) | 261 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) |
| 167 | { | 262 | { |
| 168 | (*pos)++; | 263 | (*pos)++; |
| 169 | return t_start(m, pos); | 264 | return find_next(v, pos); |
| 170 | } | 265 | } |
| 171 | 266 | ||
| 172 | static int t_show(struct seq_file *m, void *v) | 267 | static int t_show(struct seq_file *m, void *v) |
| @@ -205,6 +300,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 205 | 300 | ||
| 206 | static void t_stop(struct seq_file *m, void *p) | 301 | static void t_stop(struct seq_file *m, void *p) |
| 207 | { | 302 | { |
| 303 | format_mod_stop(); | ||
| 208 | } | 304 | } |
| 209 | 305 | ||
| 210 | static const struct seq_operations show_format_seq_ops = { | 306 | static const struct seq_operations show_format_seq_ops = { |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 7319559ed59f..f029dd4fd2ca 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 129 | static struct ftrace_ops trace_ops __read_mostly = | 129 | static struct ftrace_ops trace_ops __read_mostly = |
| 130 | { | 130 | { |
| 131 | .func = wakeup_tracer_call, | 131 | .func = wakeup_tracer_call, |
| 132 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 132 | }; | 133 | }; |
| 133 | #endif /* CONFIG_FUNCTION_TRACER */ | 134 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 134 | 135 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 659732eba07c..288541f977fb 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -101,6 +101,206 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
| 101 | 101 | ||
| 102 | #ifdef CONFIG_DYNAMIC_FTRACE | 102 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 103 | 103 | ||
| 104 | static int trace_selftest_test_probe1_cnt; | ||
| 105 | static void trace_selftest_test_probe1_func(unsigned long ip, | ||
| 106 | unsigned long pip) | ||
| 107 | { | ||
| 108 | trace_selftest_test_probe1_cnt++; | ||
| 109 | } | ||
| 110 | |||
| 111 | static int trace_selftest_test_probe2_cnt; | ||
| 112 | static void trace_selftest_test_probe2_func(unsigned long ip, | ||
| 113 | unsigned long pip) | ||
| 114 | { | ||
| 115 | trace_selftest_test_probe2_cnt++; | ||
| 116 | } | ||
| 117 | |||
| 118 | static int trace_selftest_test_probe3_cnt; | ||
| 119 | static void trace_selftest_test_probe3_func(unsigned long ip, | ||
| 120 | unsigned long pip) | ||
| 121 | { | ||
| 122 | trace_selftest_test_probe3_cnt++; | ||
| 123 | } | ||
| 124 | |||
| 125 | static int trace_selftest_test_global_cnt; | ||
| 126 | static void trace_selftest_test_global_func(unsigned long ip, | ||
| 127 | unsigned long pip) | ||
| 128 | { | ||
| 129 | trace_selftest_test_global_cnt++; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int trace_selftest_test_dyn_cnt; | ||
| 133 | static void trace_selftest_test_dyn_func(unsigned long ip, | ||
| 134 | unsigned long pip) | ||
| 135 | { | ||
| 136 | trace_selftest_test_dyn_cnt++; | ||
| 137 | } | ||
| 138 | |||
| 139 | static struct ftrace_ops test_probe1 = { | ||
| 140 | .func = trace_selftest_test_probe1_func, | ||
| 141 | }; | ||
| 142 | |||
| 143 | static struct ftrace_ops test_probe2 = { | ||
| 144 | .func = trace_selftest_test_probe2_func, | ||
| 145 | }; | ||
| 146 | |||
| 147 | static struct ftrace_ops test_probe3 = { | ||
| 148 | .func = trace_selftest_test_probe3_func, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static struct ftrace_ops test_global = { | ||
| 152 | .func = trace_selftest_test_global_func, | ||
| 153 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 154 | }; | ||
| 155 | |||
| 156 | static void print_counts(void) | ||
| 157 | { | ||
| 158 | printk("(%d %d %d %d %d) ", | ||
| 159 | trace_selftest_test_probe1_cnt, | ||
| 160 | trace_selftest_test_probe2_cnt, | ||
| 161 | trace_selftest_test_probe3_cnt, | ||
| 162 | trace_selftest_test_global_cnt, | ||
| 163 | trace_selftest_test_dyn_cnt); | ||
| 164 | } | ||
| 165 | |||
| 166 | static void reset_counts(void) | ||
| 167 | { | ||
| 168 | trace_selftest_test_probe1_cnt = 0; | ||
| 169 | trace_selftest_test_probe2_cnt = 0; | ||
| 170 | trace_selftest_test_probe3_cnt = 0; | ||
| 171 | trace_selftest_test_global_cnt = 0; | ||
| 172 | trace_selftest_test_dyn_cnt = 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | static int trace_selftest_ops(int cnt) | ||
| 176 | { | ||
| 177 | int save_ftrace_enabled = ftrace_enabled; | ||
| 178 | struct ftrace_ops *dyn_ops; | ||
| 179 | char *func1_name; | ||
| 180 | char *func2_name; | ||
| 181 | int len1; | ||
| 182 | int len2; | ||
| 183 | int ret = -1; | ||
| 184 | |||
| 185 | printk(KERN_CONT "PASSED\n"); | ||
| 186 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | ||
| 187 | |||
| 188 | ftrace_enabled = 1; | ||
| 189 | reset_counts(); | ||
| 190 | |||
| 191 | /* Handle PPC64 '.' name */ | ||
| 192 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | ||
| 193 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | ||
| 194 | len1 = strlen(func1_name); | ||
| 195 | len2 = strlen(func2_name); | ||
| 196 | |||
| 197 | /* | ||
| 198 | * Probe 1 will trace function 1. | ||
| 199 | * Probe 2 will trace function 2. | ||
| 200 | * Probe 3 will trace functions 1 and 2. | ||
| 201 | */ | ||
| 202 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | ||
| 203 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | ||
| 204 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | ||
| 205 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | ||
| 206 | |||
| 207 | register_ftrace_function(&test_probe1); | ||
| 208 | register_ftrace_function(&test_probe2); | ||
| 209 | register_ftrace_function(&test_probe3); | ||
| 210 | register_ftrace_function(&test_global); | ||
| 211 | |||
| 212 | DYN_FTRACE_TEST_NAME(); | ||
| 213 | |||
| 214 | print_counts(); | ||
| 215 | |||
| 216 | if (trace_selftest_test_probe1_cnt != 1) | ||
| 217 | goto out; | ||
| 218 | if (trace_selftest_test_probe2_cnt != 0) | ||
| 219 | goto out; | ||
| 220 | if (trace_selftest_test_probe3_cnt != 1) | ||
| 221 | goto out; | ||
| 222 | if (trace_selftest_test_global_cnt == 0) | ||
| 223 | goto out; | ||
| 224 | |||
| 225 | DYN_FTRACE_TEST_NAME2(); | ||
| 226 | |||
| 227 | print_counts(); | ||
| 228 | |||
| 229 | if (trace_selftest_test_probe1_cnt != 1) | ||
| 230 | goto out; | ||
| 231 | if (trace_selftest_test_probe2_cnt != 1) | ||
| 232 | goto out; | ||
| 233 | if (trace_selftest_test_probe3_cnt != 2) | ||
| 234 | goto out; | ||
| 235 | |||
| 236 | /* Add a dynamic probe */ | ||
| 237 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | ||
| 238 | if (!dyn_ops) { | ||
| 239 | printk("MEMORY ERROR "); | ||
| 240 | goto out; | ||
| 241 | } | ||
| 242 | |||
| 243 | dyn_ops->func = trace_selftest_test_dyn_func; | ||
| 244 | |||
| 245 | register_ftrace_function(dyn_ops); | ||
| 246 | |||
| 247 | trace_selftest_test_global_cnt = 0; | ||
| 248 | |||
| 249 | DYN_FTRACE_TEST_NAME(); | ||
| 250 | |||
| 251 | print_counts(); | ||
| 252 | |||
| 253 | if (trace_selftest_test_probe1_cnt != 2) | ||
| 254 | goto out_free; | ||
| 255 | if (trace_selftest_test_probe2_cnt != 1) | ||
| 256 | goto out_free; | ||
| 257 | if (trace_selftest_test_probe3_cnt != 3) | ||
| 258 | goto out_free; | ||
| 259 | if (trace_selftest_test_global_cnt == 0) | ||
| 260 | goto out; | ||
| 261 | if (trace_selftest_test_dyn_cnt == 0) | ||
| 262 | goto out_free; | ||
| 263 | |||
| 264 | DYN_FTRACE_TEST_NAME2(); | ||
| 265 | |||
| 266 | print_counts(); | ||
| 267 | |||
| 268 | if (trace_selftest_test_probe1_cnt != 2) | ||
| 269 | goto out_free; | ||
| 270 | if (trace_selftest_test_probe2_cnt != 2) | ||
| 271 | goto out_free; | ||
| 272 | if (trace_selftest_test_probe3_cnt != 4) | ||
| 273 | goto out_free; | ||
| 274 | |||
| 275 | ret = 0; | ||
| 276 | out_free: | ||
| 277 | unregister_ftrace_function(dyn_ops); | ||
| 278 | kfree(dyn_ops); | ||
| 279 | |||
| 280 | out: | ||
| 281 | /* Purposely unregister in the same order */ | ||
| 282 | unregister_ftrace_function(&test_probe1); | ||
| 283 | unregister_ftrace_function(&test_probe2); | ||
| 284 | unregister_ftrace_function(&test_probe3); | ||
| 285 | unregister_ftrace_function(&test_global); | ||
| 286 | |||
| 287 | /* Make sure everything is off */ | ||
| 288 | reset_counts(); | ||
| 289 | DYN_FTRACE_TEST_NAME(); | ||
| 290 | DYN_FTRACE_TEST_NAME(); | ||
| 291 | |||
| 292 | if (trace_selftest_test_probe1_cnt || | ||
| 293 | trace_selftest_test_probe2_cnt || | ||
| 294 | trace_selftest_test_probe3_cnt || | ||
| 295 | trace_selftest_test_global_cnt || | ||
| 296 | trace_selftest_test_dyn_cnt) | ||
| 297 | ret = -1; | ||
| 298 | |||
| 299 | ftrace_enabled = save_ftrace_enabled; | ||
| 300 | |||
| 301 | return ret; | ||
| 302 | } | ||
| 303 | |||
| 104 | /* Test dynamic code modification and ftrace filters */ | 304 | /* Test dynamic code modification and ftrace filters */ |
| 105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 305 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
| 106 | struct trace_array *tr, | 306 | struct trace_array *tr, |
| @@ -131,7 +331,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 331 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
| 132 | 332 | ||
| 133 | /* filter only on our function */ | 333 | /* filter only on our function */ |
| 134 | ftrace_set_filter(func_name, strlen(func_name), 1); | 334 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
| 135 | 335 | ||
| 136 | /* enable tracing */ | 336 | /* enable tracing */ |
| 137 | ret = tracer_init(trace, tr); | 337 | ret = tracer_init(trace, tr); |
| @@ -166,22 +366,30 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 166 | 366 | ||
| 167 | /* check the trace buffer */ | 367 | /* check the trace buffer */ |
| 168 | ret = trace_test_buffer(tr, &count); | 368 | ret = trace_test_buffer(tr, &count); |
| 169 | trace->reset(tr); | ||
| 170 | tracing_start(); | 369 | tracing_start(); |
| 171 | 370 | ||
| 172 | /* we should only have one item */ | 371 | /* we should only have one item */ |
| 173 | if (!ret && count != 1) { | 372 | if (!ret && count != 1) { |
| 373 | trace->reset(tr); | ||
| 174 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 374 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
| 175 | ret = -1; | 375 | ret = -1; |
| 176 | goto out; | 376 | goto out; |
| 177 | } | 377 | } |
| 178 | 378 | ||
| 379 | /* Test the ops with global tracing running */ | ||
| 380 | ret = trace_selftest_ops(1); | ||
| 381 | trace->reset(tr); | ||
| 382 | |||
| 179 | out: | 383 | out: |
| 180 | ftrace_enabled = save_ftrace_enabled; | 384 | ftrace_enabled = save_ftrace_enabled; |
| 181 | tracer_enabled = save_tracer_enabled; | 385 | tracer_enabled = save_tracer_enabled; |
| 182 | 386 | ||
| 183 | /* Enable tracing on all functions again */ | 387 | /* Enable tracing on all functions again */ |
| 184 | ftrace_set_filter(NULL, 0, 1); | 388 | ftrace_set_global_filter(NULL, 0, 1); |
| 389 | |||
| 390 | /* Test the ops with global tracing off */ | ||
| 391 | if (!ret) | ||
| 392 | ret = trace_selftest_ops(2); | ||
| 185 | 393 | ||
| 186 | return ret; | 394 | return ret; |
| 187 | } | 395 | } |
diff --git a/kernel/trace/trace_selftest_dynamic.c b/kernel/trace/trace_selftest_dynamic.c index 54dd77cce5bf..b4c475a0a48b 100644 --- a/kernel/trace/trace_selftest_dynamic.c +++ b/kernel/trace/trace_selftest_dynamic.c | |||
| @@ -5,3 +5,9 @@ int DYN_FTRACE_TEST_NAME(void) | |||
| 5 | /* used to call mcount */ | 5 | /* used to call mcount */ |
| 6 | return 0; | 6 | return 0; |
| 7 | } | 7 | } |
| 8 | |||
| 9 | int DYN_FTRACE_TEST_NAME2(void) | ||
| 10 | { | ||
| 11 | /* used to call mcount */ | ||
| 12 | return 0; | ||
| 13 | } | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4c5dead0c239..b0b53b8e4c25 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
| 134 | { | 134 | { |
| 135 | .func = stack_trace_call, | 135 | .func = stack_trace_call, |
| 136 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
| 136 | }; | 137 | }; |
| 137 | 138 | ||
| 138 | static ssize_t | 139 | static ssize_t |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 68187af4889e..b219f1449c54 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 251 | { | 251 | { |
| 252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); |
| 253 | 253 | ||
| 254 | if (elem->regfunc && !elem->state && active) | 254 | if (elem->regfunc && !jump_label_enabled(&elem->key) && active) |
| 255 | elem->regfunc(); | 255 | elem->regfunc(); |
| 256 | else if (elem->unregfunc && elem->state && !active) | 256 | else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) |
| 257 | elem->unregfunc(); | 257 | elem->unregfunc(); |
| 258 | 258 | ||
| 259 | /* | 259 | /* |
| @@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 264 | * is used. | 264 | * is used. |
| 265 | */ | 265 | */ |
| 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
| 267 | if (!elem->state && active) { | 267 | if (active && !jump_label_enabled(&elem->key)) |
| 268 | jump_label_enable(&elem->state); | 268 | jump_label_inc(&elem->key); |
| 269 | elem->state = active; | 269 | else if (!active && jump_label_enabled(&elem->key)) |
| 270 | } else if (elem->state && !active) { | 270 | jump_label_dec(&elem->key); |
| 271 | jump_label_disable(&elem->state); | ||
| 272 | elem->state = active; | ||
| 273 | } | ||
| 274 | } | 271 | } |
| 275 | 272 | ||
| 276 | /* | 273 | /* |
| @@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 281 | */ | 278 | */ |
| 282 | static void disable_tracepoint(struct tracepoint *elem) | 279 | static void disable_tracepoint(struct tracepoint *elem) |
| 283 | { | 280 | { |
| 284 | if (elem->unregfunc && elem->state) | 281 | if (elem->unregfunc && jump_label_enabled(&elem->key)) |
| 285 | elem->unregfunc(); | 282 | elem->unregfunc(); |
| 286 | 283 | ||
| 287 | if (elem->state) { | 284 | if (jump_label_enabled(&elem->key)) |
| 288 | jump_label_disable(&elem->state); | 285 | jump_label_dec(&elem->key); |
| 289 | elem->state = 0; | ||
| 290 | } | ||
| 291 | rcu_assign_pointer(elem->funcs, NULL); | 286 | rcu_assign_pointer(elem->funcs, NULL); |
| 292 | } | 287 | } |
| 293 | 288 | ||
