diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-12 10:09:42 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:36:00 -0400 |
commit | e1df4cb682ab2c3c2981c8efa4aec044e61f4e06 (patch) | |
tree | 2ca14d5b02052f2fec06a567fdb048aa7943a4f4 | |
parent | 8380d24860e9d1659ab22896b86d7fe591c424fa (diff) |
ftrace: Fix function probe to only enable needed functions
Currently the function probe enables all functions and runs a "hash"
against every function call to see if it should call a probe. This
is extremely wasteful.
Note, a probe is something like:
echo schedule:traceoff > /debug/tracing/set_ftrace_filter
When schedule is called, the probe will disable tracing. But currently,
it has a call back for *all* functions, and checks to see if the
called function is the probe that is needed.
The probe function has been created before ftrace was rewritten to
allow for more than one "op" to be registered by the function tracer.
When probes were created, it couldn't limit the functions without also
limiting normal function calls. But now we can, it's about time
to update the probe code.
Todo, have separate ops for different entries. That is, assign
a ftrace_ops per probe, instead of one op for all probes. But
as there's not many probes assigned, this may not be that urgent.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/ftrace.c | 48 |
1 files changed, 46 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e6effd0c40a9..dab031fec85b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2988,18 +2988,20 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp) | |||
2988 | kfree(entry); | 2988 | kfree(entry); |
2989 | } | 2989 | } |
2990 | 2990 | ||
2991 | |||
2992 | int | 2991 | int |
2993 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 2992 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2994 | void *data) | 2993 | void *data) |
2995 | { | 2994 | { |
2996 | struct ftrace_func_probe *entry; | 2995 | struct ftrace_func_probe *entry; |
2996 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | ||
2997 | struct ftrace_hash *hash; | ||
2997 | struct ftrace_page *pg; | 2998 | struct ftrace_page *pg; |
2998 | struct dyn_ftrace *rec; | 2999 | struct dyn_ftrace *rec; |
2999 | int type, len, not; | 3000 | int type, len, not; |
3000 | unsigned long key; | 3001 | unsigned long key; |
3001 | int count = 0; | 3002 | int count = 0; |
3002 | char *search; | 3003 | char *search; |
3004 | int ret; | ||
3003 | 3005 | ||
3004 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | 3006 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
3005 | len = strlen(search); | 3007 | len = strlen(search); |
@@ -3010,8 +3012,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3010 | 3012 | ||
3011 | mutex_lock(&ftrace_lock); | 3013 | mutex_lock(&ftrace_lock); |
3012 | 3014 | ||
3013 | if (unlikely(ftrace_disabled)) | 3015 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3016 | if (!hash) { | ||
3017 | count = -ENOMEM; | ||
3018 | goto out_unlock; | ||
3019 | } | ||
3020 | |||
3021 | if (unlikely(ftrace_disabled)) { | ||
3022 | count = -ENODEV; | ||
3014 | goto out_unlock; | 3023 | goto out_unlock; |
3024 | } | ||
3015 | 3025 | ||
3016 | do_for_each_ftrace_rec(pg, rec) { | 3026 | do_for_each_ftrace_rec(pg, rec) { |
3017 | 3027 | ||
@@ -3043,6 +3053,13 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3043 | } | 3053 | } |
3044 | } | 3054 | } |
3045 | 3055 | ||
3056 | ret = enter_record(hash, rec, 0); | ||
3057 | if (ret < 0) { | ||
3058 | kfree(entry); | ||
3059 | count = ret; | ||
3060 | goto out_unlock; | ||
3061 | } | ||
3062 | |||
3046 | entry->ops = ops; | 3063 | entry->ops = ops; |
3047 | entry->ip = rec->ip; | 3064 | entry->ip = rec->ip; |
3048 | 3065 | ||
@@ -3050,10 +3067,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3050 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | 3067 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); |
3051 | 3068 | ||
3052 | } while_for_each_ftrace_rec(); | 3069 | } while_for_each_ftrace_rec(); |
3070 | |||
3071 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | ||
3072 | if (ret < 0) | ||
3073 | count = ret; | ||
3074 | |||
3053 | __enable_ftrace_function_probe(); | 3075 | __enable_ftrace_function_probe(); |
3054 | 3076 | ||
3055 | out_unlock: | 3077 | out_unlock: |
3056 | mutex_unlock(&ftrace_lock); | 3078 | mutex_unlock(&ftrace_lock); |
3079 | free_ftrace_hash(hash); | ||
3057 | 3080 | ||
3058 | return count; | 3081 | return count; |
3059 | } | 3082 | } |
@@ -3067,7 +3090,10 @@ static void | |||
3067 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3090 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3068 | void *data, int flags) | 3091 | void *data, int flags) |
3069 | { | 3092 | { |
3093 | struct ftrace_func_entry *rec_entry; | ||
3070 | struct ftrace_func_probe *entry; | 3094 | struct ftrace_func_probe *entry; |
3095 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | ||
3096 | struct ftrace_hash *hash; | ||
3071 | struct hlist_node *n, *tmp; | 3097 | struct hlist_node *n, *tmp; |
3072 | char str[KSYM_SYMBOL_LEN]; | 3098 | char str[KSYM_SYMBOL_LEN]; |
3073 | int type = MATCH_FULL; | 3099 | int type = MATCH_FULL; |
@@ -3088,6 +3114,12 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3088 | } | 3114 | } |
3089 | 3115 | ||
3090 | mutex_lock(&ftrace_lock); | 3116 | mutex_lock(&ftrace_lock); |
3117 | |||
3118 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
3119 | if (!hash) | ||
3120 | /* Hmm, should report this somehow */ | ||
3121 | goto out_unlock; | ||
3122 | |||
3091 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3123 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
3092 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3124 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
3093 | 3125 | ||
@@ -3108,12 +3140,24 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3108 | continue; | 3140 | continue; |
3109 | } | 3141 | } |
3110 | 3142 | ||
3143 | rec_entry = ftrace_lookup_ip(hash, entry->ip); | ||
3144 | /* It is possible more than one entry had this ip */ | ||
3145 | if (rec_entry) | ||
3146 | free_hash_entry(hash, rec_entry); | ||
3147 | |||
3111 | hlist_del_rcu(&entry->node); | 3148 | hlist_del_rcu(&entry->node); |
3112 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); | 3149 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); |
3113 | } | 3150 | } |
3114 | } | 3151 | } |
3115 | __disable_ftrace_function_probe(); | 3152 | __disable_ftrace_function_probe(); |
3153 | /* | ||
3154 | * Remove after the disable is called. Otherwise, if the last | ||
3155 | * probe is removed, a null hash means *all enabled*. | ||
3156 | */ | ||
3157 | ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | ||
3158 | out_unlock: | ||
3116 | mutex_unlock(&ftrace_lock); | 3159 | mutex_unlock(&ftrace_lock); |
3160 | free_ftrace_hash(hash); | ||
3117 | } | 3161 | } |
3118 | 3162 | ||
3119 | void | 3163 | void |