aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c1457
1 files changed, 1105 insertions, 352 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fa7ece649fe1..908038f57440 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -39,20 +39,26 @@
39#include "trace_stat.h" 39#include "trace_stat.h"
40 40
41#define FTRACE_WARN_ON(cond) \ 41#define FTRACE_WARN_ON(cond) \
42 do { \ 42 ({ \
43 if (WARN_ON(cond)) \ 43 int ___r = cond; \
44 if (WARN_ON(___r)) \
44 ftrace_kill(); \ 45 ftrace_kill(); \
45 } while (0) 46 ___r; \
47 })
46 48
47#define FTRACE_WARN_ON_ONCE(cond) \ 49#define FTRACE_WARN_ON_ONCE(cond) \
48 do { \ 50 ({ \
49 if (WARN_ON_ONCE(cond)) \ 51 int ___r = cond; \
52 if (WARN_ON_ONCE(___r)) \
50 ftrace_kill(); \ 53 ftrace_kill(); \
51 } while (0) 54 ___r; \
55 })
52 56
53/* hash bits for specific function selection */ 57/* hash bits for specific function selection */
54#define FTRACE_HASH_BITS 7 58#define FTRACE_HASH_BITS 7
55#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 59#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60#define FTRACE_HASH_DEFAULT_BITS 10
61#define FTRACE_HASH_MAX_BITS 12
56 62
57/* ftrace_enabled is a method to turn ftrace on or off */ 63/* ftrace_enabled is a method to turn ftrace on or off */
58int ftrace_enabled __read_mostly; 64int ftrace_enabled __read_mostly;
@@ -81,28 +87,40 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
81 .func = ftrace_stub, 87 .func = ftrace_stub,
82}; 88};
83 89
84static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 90static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
85ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 92ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
86ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 93ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
87ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 94ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95static struct ftrace_ops global_ops;
96
97static void
98ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
88 99
89/* 100/*
90 * Traverse the ftrace_list, invoking all entries. The reason that we 101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
91 * can use rcu_dereference_raw() is that elements removed from this list 102 * can use rcu_dereference_raw() is that elements removed from this list
92 * are simply leaked, so there is no need to interact with a grace-period 103 * are simply leaked, so there is no need to interact with a grace-period
93 * mechanism. The rcu_dereference_raw() calls are needed to handle 104 * mechanism. The rcu_dereference_raw() calls are needed to handle
94 * concurrent insertions into the ftrace_list. 105 * concurrent insertions into the ftrace_global_list.
95 * 106 *
96 * Silly Alpha and silly pointer-speculation compiler optimizations! 107 * Silly Alpha and silly pointer-speculation compiler optimizations!
97 */ 108 */
98static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 109static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
99{ 111{
100 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ 112 struct ftrace_ops *op;
101 113
114 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
115 return;
116
117 trace_recursion_set(TRACE_GLOBAL_BIT);
118 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
102 while (op != &ftrace_list_end) { 119 while (op != &ftrace_list_end) {
103 op->func(ip, parent_ip); 120 op->func(ip, parent_ip);
104 op = rcu_dereference_raw(op->next); /*see above*/ 121 op = rcu_dereference_raw(op->next); /*see above*/
105 }; 122 };
123 trace_recursion_clear(TRACE_GLOBAL_BIT);
106} 124}
107 125
108static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) 126static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
@@ -147,46 +165,69 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
147} 165}
148#endif 166#endif
149 167
150static int __register_ftrace_function(struct ftrace_ops *ops) 168static void update_global_ops(void)
151{ 169{
152 ops->next = ftrace_list; 170 ftrace_func_t func;
171
153 /* 172 /*
154 * We are entering ops into the ftrace_list but another 173 * If there's only one function registered, then call that
155 * CPU might be walking that list. We need to make sure 174 * function directly. Otherwise, we need to iterate over the
156 * the ops->next pointer is valid before another CPU sees 175 * registered callers.
157 * the ops pointer included into the ftrace_list.
158 */ 176 */
159 rcu_assign_pointer(ftrace_list, ops); 177 if (ftrace_global_list == &ftrace_list_end ||
178 ftrace_global_list->next == &ftrace_list_end)
179 func = ftrace_global_list->func;
180 else
181 func = ftrace_global_list_func;
160 182
161 if (ftrace_enabled) { 183 /* If we filter on pids, update to use the pid function */
162 ftrace_func_t func; 184 if (!list_empty(&ftrace_pids)) {
185 set_ftrace_pid_function(func);
186 func = ftrace_pid_func;
187 }
163 188
164 if (ops->next == &ftrace_list_end) 189 global_ops.func = func;
165 func = ops->func; 190}
166 else
167 func = ftrace_list_func;
168 191
169 if (!list_empty(&ftrace_pids)) { 192static void update_ftrace_function(void)
170 set_ftrace_pid_function(func); 193{
171 func = ftrace_pid_func; 194 ftrace_func_t func;
172 } 195
196 update_global_ops();
197
198 /*
199 * If we are at the end of the list and this ops is
200 * not dynamic, then have the mcount trampoline call
201 * the function directly
202 */
203 if (ftrace_ops_list == &ftrace_list_end ||
204 (ftrace_ops_list->next == &ftrace_list_end &&
205 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
206 func = ftrace_ops_list->func;
207 else
208 func = ftrace_ops_list_func;
173 209
174 /*
175 * For one func, simply call it directly.
176 * For more than one func, call the chain.
177 */
178#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 210#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
179 ftrace_trace_function = func; 211 ftrace_trace_function = func;
180#else 212#else
181 __ftrace_trace_function = func; 213 __ftrace_trace_function = func;
182 ftrace_trace_function = ftrace_test_stop_func; 214 ftrace_trace_function = ftrace_test_stop_func;
183#endif 215#endif
184 } 216}
185 217
186 return 0; 218static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
219{
220 ops->next = *list;
221 /*
222 * We are entering ops into the list but another
223 * CPU might be walking that list. We need to make sure
224 * the ops->next pointer is valid before another CPU sees
225 * the ops pointer included into the list.
226 */
227 rcu_assign_pointer(*list, ops);
187} 228}
188 229
189static int __unregister_ftrace_function(struct ftrace_ops *ops) 230static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
190{ 231{
191 struct ftrace_ops **p; 232 struct ftrace_ops **p;
192 233
@@ -194,13 +235,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
194 * If we are removing the last function, then simply point 235 * If we are removing the last function, then simply point
195 * to the ftrace_stub. 236 * to the ftrace_stub.
196 */ 237 */
197 if (ftrace_list == ops && ops->next == &ftrace_list_end) { 238 if (*list == ops && ops->next == &ftrace_list_end) {
198 ftrace_trace_function = ftrace_stub; 239 *list = &ftrace_list_end;
199 ftrace_list = &ftrace_list_end;
200 return 0; 240 return 0;
201 } 241 }
202 242
203 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) 243 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
204 if (*p == ops) 244 if (*p == ops)
205 break; 245 break;
206 246
@@ -208,53 +248,83 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
208 return -1; 248 return -1;
209 249
210 *p = (*p)->next; 250 *p = (*p)->next;
251 return 0;
252}
211 253
212 if (ftrace_enabled) { 254static int __register_ftrace_function(struct ftrace_ops *ops)
213 /* If we only have one func left, then call that directly */ 255{
214 if (ftrace_list->next == &ftrace_list_end) { 256 if (ftrace_disabled)
215 ftrace_func_t func = ftrace_list->func; 257 return -ENODEV;
216 258
217 if (!list_empty(&ftrace_pids)) { 259 if (FTRACE_WARN_ON(ops == &global_ops))
218 set_ftrace_pid_function(func); 260 return -EINVAL;
219 func = ftrace_pid_func; 261
220 } 262 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
221#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 263 return -EBUSY;
222 ftrace_trace_function = func; 264
223#else 265 if (!core_kernel_data((unsigned long)ops))
224 __ftrace_trace_function = func; 266 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
225#endif 267
226 } 268 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
227 } 269 int first = ftrace_global_list == &ftrace_list_end;
270 add_ftrace_ops(&ftrace_global_list, ops);
271 ops->flags |= FTRACE_OPS_FL_ENABLED;
272 if (first)
273 add_ftrace_ops(&ftrace_ops_list, &global_ops);
274 } else
275 add_ftrace_ops(&ftrace_ops_list, ops);
276
277 if (ftrace_enabled)
278 update_ftrace_function();
228 279
229 return 0; 280 return 0;
230} 281}
231 282
232static void ftrace_update_pid_func(void) 283static int __unregister_ftrace_function(struct ftrace_ops *ops)
233{ 284{
234 ftrace_func_t func; 285 int ret;
235 286
236 if (ftrace_trace_function == ftrace_stub) 287 if (ftrace_disabled)
237 return; 288 return -ENODEV;
238 289
239#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 290 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
240 func = ftrace_trace_function; 291 return -EBUSY;
241#else
242 func = __ftrace_trace_function;
243#endif
244 292
245 if (!list_empty(&ftrace_pids)) { 293 if (FTRACE_WARN_ON(ops == &global_ops))
246 set_ftrace_pid_function(func); 294 return -EINVAL;
247 func = ftrace_pid_func;
248 } else {
249 if (func == ftrace_pid_func)
250 func = ftrace_pid_function;
251 }
252 295
253#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 296 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
254 ftrace_trace_function = func; 297 ret = remove_ftrace_ops(&ftrace_global_list, ops);
255#else 298 if (!ret && ftrace_global_list == &ftrace_list_end)
256 __ftrace_trace_function = func; 299 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
257#endif 300 if (!ret)
301 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
302 } else
303 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
304
305 if (ret < 0)
306 return ret;
307
308 if (ftrace_enabled)
309 update_ftrace_function();
310
311 /*
312 * Dynamic ops may be freed, we must make sure that all
313 * callers are done before leaving this function.
314 */
315 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
316 synchronize_sched();
317
318 return 0;
319}
320
321static void ftrace_update_pid_func(void)
322{
323 /* Only do something if we are tracing something */
324 if (ftrace_trace_function == ftrace_stub)
325 return;
326
327 update_ftrace_function();
258} 328}
259 329
260#ifdef CONFIG_FUNCTION_PROFILER 330#ifdef CONFIG_FUNCTION_PROFILER
@@ -800,6 +870,7 @@ static const struct file_operations ftrace_profile_fops = {
800 .open = tracing_open_generic, 870 .open = tracing_open_generic,
801 .read = ftrace_profile_read, 871 .read = ftrace_profile_read,
802 .write = ftrace_profile_write, 872 .write = ftrace_profile_write,
873 .llseek = default_llseek,
803}; 874};
804 875
805/* used to initialize the real stat files */ 876/* used to initialize the real stat files */
@@ -884,13 +955,38 @@ enum {
884 FTRACE_ENABLE_CALLS = (1 << 0), 955 FTRACE_ENABLE_CALLS = (1 << 0),
885 FTRACE_DISABLE_CALLS = (1 << 1), 956 FTRACE_DISABLE_CALLS = (1 << 1),
886 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 957 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
887 FTRACE_ENABLE_MCOUNT = (1 << 3), 958 FTRACE_START_FUNC_RET = (1 << 3),
888 FTRACE_DISABLE_MCOUNT = (1 << 4), 959 FTRACE_STOP_FUNC_RET = (1 << 4),
889 FTRACE_START_FUNC_RET = (1 << 5), 960};
890 FTRACE_STOP_FUNC_RET = (1 << 6), 961struct ftrace_func_entry {
962 struct hlist_node hlist;
963 unsigned long ip;
964};
965
966struct ftrace_hash {
967 unsigned long size_bits;
968 struct hlist_head *buckets;
969 unsigned long count;
970 struct rcu_head rcu;
971};
972
973/*
974 * We make these constant because no one should touch them,
975 * but they are used as the default "empty hash", to avoid allocating
976 * it all the time. These are in a read only section such that if
977 * anyone does try to modify it, it will cause an exception.
978 */
979static const struct hlist_head empty_buckets[1];
980static const struct ftrace_hash empty_hash = {
981 .buckets = (struct hlist_head *)empty_buckets,
891}; 982};
983#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
892 984
893static int ftrace_filtered; 985static struct ftrace_ops global_ops = {
986 .func = ftrace_stub,
987 .notrace_hash = EMPTY_HASH,
988 .filter_hash = EMPTY_HASH,
989};
894 990
895static struct dyn_ftrace *ftrace_new_addrs; 991static struct dyn_ftrace *ftrace_new_addrs;
896 992
@@ -913,6 +1009,269 @@ static struct ftrace_page *ftrace_pages;
913 1009
914static struct dyn_ftrace *ftrace_free_records; 1010static struct dyn_ftrace *ftrace_free_records;
915 1011
1012static struct ftrace_func_entry *
1013ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1014{
1015 unsigned long key;
1016 struct ftrace_func_entry *entry;
1017 struct hlist_head *hhd;
1018 struct hlist_node *n;
1019
1020 if (!hash->count)
1021 return NULL;
1022
1023 if (hash->size_bits > 0)
1024 key = hash_long(ip, hash->size_bits);
1025 else
1026 key = 0;
1027
1028 hhd = &hash->buckets[key];
1029
1030 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1031 if (entry->ip == ip)
1032 return entry;
1033 }
1034 return NULL;
1035}
1036
1037static void __add_hash_entry(struct ftrace_hash *hash,
1038 struct ftrace_func_entry *entry)
1039{
1040 struct hlist_head *hhd;
1041 unsigned long key;
1042
1043 if (hash->size_bits)
1044 key = hash_long(entry->ip, hash->size_bits);
1045 else
1046 key = 0;
1047
1048 hhd = &hash->buckets[key];
1049 hlist_add_head(&entry->hlist, hhd);
1050 hash->count++;
1051}
1052
1053static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1054{
1055 struct ftrace_func_entry *entry;
1056
1057 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1058 if (!entry)
1059 return -ENOMEM;
1060
1061 entry->ip = ip;
1062 __add_hash_entry(hash, entry);
1063
1064 return 0;
1065}
1066
1067static void
1068free_hash_entry(struct ftrace_hash *hash,
1069 struct ftrace_func_entry *entry)
1070{
1071 hlist_del(&entry->hlist);
1072 kfree(entry);
1073 hash->count--;
1074}
1075
1076static void
1077remove_hash_entry(struct ftrace_hash *hash,
1078 struct ftrace_func_entry *entry)
1079{
1080 hlist_del(&entry->hlist);
1081 hash->count--;
1082}
1083
1084static void ftrace_hash_clear(struct ftrace_hash *hash)
1085{
1086 struct hlist_head *hhd;
1087 struct hlist_node *tp, *tn;
1088 struct ftrace_func_entry *entry;
1089 int size = 1 << hash->size_bits;
1090 int i;
1091
1092 if (!hash->count)
1093 return;
1094
1095 for (i = 0; i < size; i++) {
1096 hhd = &hash->buckets[i];
1097 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1098 free_hash_entry(hash, entry);
1099 }
1100 FTRACE_WARN_ON(hash->count);
1101}
1102
1103static void free_ftrace_hash(struct ftrace_hash *hash)
1104{
1105 if (!hash || hash == EMPTY_HASH)
1106 return;
1107 ftrace_hash_clear(hash);
1108 kfree(hash->buckets);
1109 kfree(hash);
1110}
1111
1112static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1113{
1114 struct ftrace_hash *hash;
1115
1116 hash = container_of(rcu, struct ftrace_hash, rcu);
1117 free_ftrace_hash(hash);
1118}
1119
1120static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1121{
1122 if (!hash || hash == EMPTY_HASH)
1123 return;
1124 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1125}
1126
1127static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1128{
1129 struct ftrace_hash *hash;
1130 int size;
1131
1132 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1133 if (!hash)
1134 return NULL;
1135
1136 size = 1 << size_bits;
1137 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1138
1139 if (!hash->buckets) {
1140 kfree(hash);
1141 return NULL;
1142 }
1143
1144 hash->size_bits = size_bits;
1145
1146 return hash;
1147}
1148
1149static struct ftrace_hash *
1150alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1151{
1152 struct ftrace_func_entry *entry;
1153 struct ftrace_hash *new_hash;
1154 struct hlist_node *tp;
1155 int size;
1156 int ret;
1157 int i;
1158
1159 new_hash = alloc_ftrace_hash(size_bits);
1160 if (!new_hash)
1161 return NULL;
1162
1163 /* Empty hash? */
1164 if (!hash || !hash->count)
1165 return new_hash;
1166
1167 size = 1 << hash->size_bits;
1168 for (i = 0; i < size; i++) {
1169 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1170 ret = add_hash_entry(new_hash, entry->ip);
1171 if (ret < 0)
1172 goto free_hash;
1173 }
1174 }
1175
1176 FTRACE_WARN_ON(new_hash->count != hash->count);
1177
1178 return new_hash;
1179
1180 free_hash:
1181 free_ftrace_hash(new_hash);
1182 return NULL;
1183}
1184
1185static int
1186ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1187{
1188 struct ftrace_func_entry *entry;
1189 struct hlist_node *tp, *tn;
1190 struct hlist_head *hhd;
1191 struct ftrace_hash *old_hash;
1192 struct ftrace_hash *new_hash;
1193 unsigned long key;
1194 int size = src->count;
1195 int bits = 0;
1196 int i;
1197
1198 /*
1199 * If the new source is empty, just free dst and assign it
1200 * the empty_hash.
1201 */
1202 if (!src->count) {
1203 free_ftrace_hash_rcu(*dst);
1204 rcu_assign_pointer(*dst, EMPTY_HASH);
1205 return 0;
1206 }
1207
1208 /*
1209 * Make the hash size about 1/2 the # found
1210 */
1211 for (size /= 2; size; size >>= 1)
1212 bits++;
1213
1214 /* Don't allocate too much */
1215 if (bits > FTRACE_HASH_MAX_BITS)
1216 bits = FTRACE_HASH_MAX_BITS;
1217
1218 new_hash = alloc_ftrace_hash(bits);
1219 if (!new_hash)
1220 return -ENOMEM;
1221
1222 size = 1 << src->size_bits;
1223 for (i = 0; i < size; i++) {
1224 hhd = &src->buckets[i];
1225 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1226 if (bits > 0)
1227 key = hash_long(entry->ip, bits);
1228 else
1229 key = 0;
1230 remove_hash_entry(src, entry);
1231 __add_hash_entry(new_hash, entry);
1232 }
1233 }
1234
1235 old_hash = *dst;
1236 rcu_assign_pointer(*dst, new_hash);
1237 free_ftrace_hash_rcu(old_hash);
1238
1239 return 0;
1240}
1241
1242/*
1243 * Test the hashes for this ops to see if we want to call
1244 * the ops->func or not.
1245 *
1246 * It's a match if the ip is in the ops->filter_hash or
1247 * the filter_hash does not exist or is empty,
1248 * AND
1249 * the ip is not in the ops->notrace_hash.
1250 *
1251 * This needs to be called with preemption disabled as
1252 * the hashes are freed with call_rcu_sched().
1253 */
1254static int
1255ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1256{
1257 struct ftrace_hash *filter_hash;
1258 struct ftrace_hash *notrace_hash;
1259 int ret;
1260
1261 filter_hash = rcu_dereference_raw(ops->filter_hash);
1262 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1263
1264 if ((!filter_hash || !filter_hash->count ||
1265 ftrace_lookup_ip(filter_hash, ip)) &&
1266 (!notrace_hash || !notrace_hash->count ||
1267 !ftrace_lookup_ip(notrace_hash, ip)))
1268 ret = 1;
1269 else
1270 ret = 0;
1271
1272 return ret;
1273}
1274
916/* 1275/*
917 * This is a double for. Do not use 'break' to break out of the loop, 1276 * This is a double for. Do not use 'break' to break out of the loop,
918 * you must use a goto. 1277 * you must use a goto.
@@ -927,6 +1286,105 @@ static struct dyn_ftrace *ftrace_free_records;
927 } \ 1286 } \
928 } 1287 }
929 1288
1289static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1290 int filter_hash,
1291 bool inc)
1292{
1293 struct ftrace_hash *hash;
1294 struct ftrace_hash *other_hash;
1295 struct ftrace_page *pg;
1296 struct dyn_ftrace *rec;
1297 int count = 0;
1298 int all = 0;
1299
1300 /* Only update if the ops has been registered */
1301 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1302 return;
1303
1304 /*
1305 * In the filter_hash case:
1306 * If the count is zero, we update all records.
1307 * Otherwise we just update the items in the hash.
1308 *
1309 * In the notrace_hash case:
1310 * We enable the update in the hash.
1311 * As disabling notrace means enabling the tracing,
1312 * and enabling notrace means disabling, the inc variable
1313 * gets inversed.
1314 */
1315 if (filter_hash) {
1316 hash = ops->filter_hash;
1317 other_hash = ops->notrace_hash;
1318 if (!hash || !hash->count)
1319 all = 1;
1320 } else {
1321 inc = !inc;
1322 hash = ops->notrace_hash;
1323 other_hash = ops->filter_hash;
1324 /*
1325 * If the notrace hash has no items,
1326 * then there's nothing to do.
1327 */
1328 if (hash && !hash->count)
1329 return;
1330 }
1331
1332 do_for_each_ftrace_rec(pg, rec) {
1333 int in_other_hash = 0;
1334 int in_hash = 0;
1335 int match = 0;
1336
1337 if (all) {
1338 /*
1339 * Only the filter_hash affects all records.
1340 * Update if the record is not in the notrace hash.
1341 */
1342 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1343 match = 1;
1344 } else {
1345 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1346 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1347
1348 /*
1349 *
1350 */
1351 if (filter_hash && in_hash && !in_other_hash)
1352 match = 1;
1353 else if (!filter_hash && in_hash &&
1354 (in_other_hash || !other_hash->count))
1355 match = 1;
1356 }
1357 if (!match)
1358 continue;
1359
1360 if (inc) {
1361 rec->flags++;
1362 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1363 return;
1364 } else {
1365 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1366 return;
1367 rec->flags--;
1368 }
1369 count++;
1370 /* Shortcut, if we handled all records, we are done. */
1371 if (!all && count == hash->count)
1372 return;
1373 } while_for_each_ftrace_rec();
1374}
1375
1376static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1377 int filter_hash)
1378{
1379 __ftrace_hash_rec_update(ops, filter_hash, 0);
1380}
1381
1382static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1383 int filter_hash)
1384{
1385 __ftrace_hash_rec_update(ops, filter_hash, 1);
1386}
1387
930static void ftrace_free_rec(struct dyn_ftrace *rec) 1388static void ftrace_free_rec(struct dyn_ftrace *rec)
931{ 1389{
932 rec->freelist = ftrace_free_records; 1390 rec->freelist = ftrace_free_records;
@@ -1048,18 +1506,18 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1048 ftrace_addr = (unsigned long)FTRACE_ADDR; 1506 ftrace_addr = (unsigned long)FTRACE_ADDR;
1049 1507
1050 /* 1508 /*
1051 * If this record is not to be traced or we want to disable it, 1509 * If we are enabling tracing:
1052 * then disable it.
1053 * 1510 *
1054 * If we want to enable it and filtering is off, then enable it. 1511 * If the record has a ref count, then we need to enable it
1512 * because someone is using it.
1055 * 1513 *
1056 * If we want to enable it and filtering is on, enable it only if 1514 * Otherwise we make sure its disabled.
1057 * it's filtered 1515 *
1516 * If we are disabling tracing, then disable all records that
1517 * are enabled.
1058 */ 1518 */
1059 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { 1519 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1060 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) 1520 flag = FTRACE_FL_ENABLED;
1061 flag = FTRACE_FL_ENABLED;
1062 }
1063 1521
1064 /* If the state of this record hasn't changed, then do nothing */ 1522 /* If the state of this record hasn't changed, then do nothing */
1065 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 1523 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1080,19 +1538,16 @@ static void ftrace_replace_code(int enable)
1080 struct ftrace_page *pg; 1538 struct ftrace_page *pg;
1081 int failed; 1539 int failed;
1082 1540
1541 if (unlikely(ftrace_disabled))
1542 return;
1543
1083 do_for_each_ftrace_rec(pg, rec) { 1544 do_for_each_ftrace_rec(pg, rec) {
1084 /* 1545 /* Skip over free records */
1085 * Skip over free records, records that have 1546 if (rec->flags & FTRACE_FL_FREE)
1086 * failed and not converted.
1087 */
1088 if (rec->flags & FTRACE_FL_FREE ||
1089 rec->flags & FTRACE_FL_FAILED ||
1090 !(rec->flags & FTRACE_FL_CONVERTED))
1091 continue; 1547 continue;
1092 1548
1093 failed = __ftrace_replace_code(rec, enable); 1549 failed = __ftrace_replace_code(rec, enable);
1094 if (failed) { 1550 if (failed) {
1095 rec->flags |= FTRACE_FL_FAILED;
1096 ftrace_bug(failed, rec->ip); 1551 ftrace_bug(failed, rec->ip);
1097 /* Stop processing */ 1552 /* Stop processing */
1098 return; 1553 return;
@@ -1108,10 +1563,12 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1108 1563
1109 ip = rec->ip; 1564 ip = rec->ip;
1110 1565
1566 if (unlikely(ftrace_disabled))
1567 return 0;
1568
1111 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 1569 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1112 if (ret) { 1570 if (ret) {
1113 ftrace_bug(ret, ip); 1571 ftrace_bug(ret, ip);
1114 rec->flags |= FTRACE_FL_FAILED;
1115 return 0; 1572 return 0;
1116 } 1573 }
1117 return 1; 1574 return 1;
@@ -1172,6 +1629,7 @@ static void ftrace_run_update_code(int command)
1172 1629
1173static ftrace_func_t saved_ftrace_func; 1630static ftrace_func_t saved_ftrace_func;
1174static int ftrace_start_up; 1631static int ftrace_start_up;
1632static int global_start_up;
1175 1633
1176static void ftrace_startup_enable(int command) 1634static void ftrace_startup_enable(int command)
1177{ 1635{
@@ -1186,19 +1644,38 @@ static void ftrace_startup_enable(int command)
1186 ftrace_run_update_code(command); 1644 ftrace_run_update_code(command);
1187} 1645}
1188 1646
1189static void ftrace_startup(int command) 1647static int ftrace_startup(struct ftrace_ops *ops, int command)
1190{ 1648{
1649 bool hash_enable = true;
1650
1191 if (unlikely(ftrace_disabled)) 1651 if (unlikely(ftrace_disabled))
1192 return; 1652 return -ENODEV;
1193 1653
1194 ftrace_start_up++; 1654 ftrace_start_up++;
1195 command |= FTRACE_ENABLE_CALLS; 1655 command |= FTRACE_ENABLE_CALLS;
1196 1656
1657 /* ops marked global share the filter hashes */
1658 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1659 ops = &global_ops;
1660 /* Don't update hash if global is already set */
1661 if (global_start_up)
1662 hash_enable = false;
1663 global_start_up++;
1664 }
1665
1666 ops->flags |= FTRACE_OPS_FL_ENABLED;
1667 if (hash_enable)
1668 ftrace_hash_rec_enable(ops, 1);
1669
1197 ftrace_startup_enable(command); 1670 ftrace_startup_enable(command);
1671
1672 return 0;
1198} 1673}
1199 1674
1200static void ftrace_shutdown(int command) 1675static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1201{ 1676{
1677 bool hash_disable = true;
1678
1202 if (unlikely(ftrace_disabled)) 1679 if (unlikely(ftrace_disabled))
1203 return; 1680 return;
1204 1681
@@ -1210,6 +1687,23 @@ static void ftrace_shutdown(int command)
1210 */ 1687 */
1211 WARN_ON_ONCE(ftrace_start_up < 0); 1688 WARN_ON_ONCE(ftrace_start_up < 0);
1212 1689
1690 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1691 ops = &global_ops;
1692 global_start_up--;
1693 WARN_ON_ONCE(global_start_up < 0);
1694 /* Don't update hash if global still has users */
1695 if (global_start_up) {
1696 WARN_ON_ONCE(!ftrace_start_up);
1697 hash_disable = false;
1698 }
1699 }
1700
1701 if (hash_disable)
1702 ftrace_hash_rec_disable(ops, 1);
1703
1704 if (ops != &global_ops || !global_start_up)
1705 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1706
1213 if (!ftrace_start_up) 1707 if (!ftrace_start_up)
1214 command |= FTRACE_DISABLE_CALLS; 1708 command |= FTRACE_DISABLE_CALLS;
1215 1709
@@ -1226,8 +1720,6 @@ static void ftrace_shutdown(int command)
1226 1720
1227static void ftrace_startup_sysctl(void) 1721static void ftrace_startup_sysctl(void)
1228{ 1722{
1229 int command = FTRACE_ENABLE_MCOUNT;
1230
1231 if (unlikely(ftrace_disabled)) 1723 if (unlikely(ftrace_disabled))
1232 return; 1724 return;
1233 1725
@@ -1235,23 +1727,17 @@ static void ftrace_startup_sysctl(void)
1235 saved_ftrace_func = NULL; 1727 saved_ftrace_func = NULL;
1236 /* ftrace_start_up is true if we want ftrace running */ 1728 /* ftrace_start_up is true if we want ftrace running */
1237 if (ftrace_start_up) 1729 if (ftrace_start_up)
1238 command |= FTRACE_ENABLE_CALLS; 1730 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1239
1240 ftrace_run_update_code(command);
1241} 1731}
1242 1732
1243static void ftrace_shutdown_sysctl(void) 1733static void ftrace_shutdown_sysctl(void)
1244{ 1734{
1245 int command = FTRACE_DISABLE_MCOUNT;
1246
1247 if (unlikely(ftrace_disabled)) 1735 if (unlikely(ftrace_disabled))
1248 return; 1736 return;
1249 1737
1250 /* ftrace_start_up is true if ftrace is running */ 1738 /* ftrace_start_up is true if ftrace is running */
1251 if (ftrace_start_up) 1739 if (ftrace_start_up)
1252 command |= FTRACE_DISABLE_CALLS; 1740 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1253
1254 ftrace_run_update_code(command);
1255} 1741}
1256 1742
1257static cycle_t ftrace_update_time; 1743static cycle_t ftrace_update_time;
@@ -1277,15 +1763,15 @@ static int ftrace_update_code(struct module *mod)
1277 p->flags = 0L; 1763 p->flags = 0L;
1278 1764
1279 /* 1765 /*
1280 * Do the initial record convertion from mcount jump 1766 * Do the initial record conversion from mcount jump
1281 * to the NOP instructions. 1767 * to the NOP instructions.
1282 */ 1768 */
1283 if (!ftrace_code_disable(mod, p)) { 1769 if (!ftrace_code_disable(mod, p)) {
1284 ftrace_free_rec(p); 1770 ftrace_free_rec(p);
1285 continue; 1771 /* Game over */
1772 break;
1286 } 1773 }
1287 1774
1288 p->flags |= FTRACE_FL_CONVERTED;
1289 ftrace_update_cnt++; 1775 ftrace_update_cnt++;
1290 1776
1291 /* 1777 /*
@@ -1360,32 +1846,39 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1360enum { 1846enum {
1361 FTRACE_ITER_FILTER = (1 << 0), 1847 FTRACE_ITER_FILTER = (1 << 0),
1362 FTRACE_ITER_NOTRACE = (1 << 1), 1848 FTRACE_ITER_NOTRACE = (1 << 1),
1363 FTRACE_ITER_FAILURES = (1 << 2), 1849 FTRACE_ITER_PRINTALL = (1 << 2),
1364 FTRACE_ITER_PRINTALL = (1 << 3), 1850 FTRACE_ITER_HASH = (1 << 3),
1365 FTRACE_ITER_HASH = (1 << 4), 1851 FTRACE_ITER_ENABLED = (1 << 4),
1366}; 1852};
1367 1853
1368#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 1854#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1369 1855
1370struct ftrace_iterator { 1856struct ftrace_iterator {
1371 struct ftrace_page *pg; 1857 loff_t pos;
1372 int hidx; 1858 loff_t func_pos;
1373 int idx; 1859 struct ftrace_page *pg;
1374 unsigned flags; 1860 struct dyn_ftrace *func;
1375 struct trace_parser parser; 1861 struct ftrace_func_probe *probe;
1862 struct trace_parser parser;
1863 struct ftrace_hash *hash;
1864 struct ftrace_ops *ops;
1865 int hidx;
1866 int idx;
1867 unsigned flags;
1376}; 1868};
1377 1869
1378static void * 1870static void *
1379t_hash_next(struct seq_file *m, void *v, loff_t *pos) 1871t_hash_next(struct seq_file *m, loff_t *pos)
1380{ 1872{
1381 struct ftrace_iterator *iter = m->private; 1873 struct ftrace_iterator *iter = m->private;
1382 struct hlist_node *hnd = v; 1874 struct hlist_node *hnd = NULL;
1383 struct hlist_head *hhd; 1875 struct hlist_head *hhd;
1384 1876
1385 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1386
1387 (*pos)++; 1877 (*pos)++;
1878 iter->pos = *pos;
1388 1879
1880 if (iter->probe)
1881 hnd = &iter->probe->node;
1389 retry: 1882 retry:
1390 if (iter->hidx >= FTRACE_FUNC_HASHSIZE) 1883 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1391 return NULL; 1884 return NULL;
@@ -1408,7 +1901,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1408 } 1901 }
1409 } 1902 }
1410 1903
1411 return hnd; 1904 if (WARN_ON_ONCE(!hnd))
1905 return NULL;
1906
1907 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1908
1909 return iter;
1412} 1910}
1413 1911
1414static void *t_hash_start(struct seq_file *m, loff_t *pos) 1912static void *t_hash_start(struct seq_file *m, loff_t *pos)
@@ -1417,26 +1915,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
1417 void *p = NULL; 1915 void *p = NULL;
1418 loff_t l; 1916 loff_t l;
1419 1917
1420 if (!(iter->flags & FTRACE_ITER_HASH)) 1918 if (iter->func_pos > *pos)
1421 *pos = 0; 1919 return NULL;
1422
1423 iter->flags |= FTRACE_ITER_HASH;
1424 1920
1425 iter->hidx = 0; 1921 iter->hidx = 0;
1426 for (l = 0; l <= *pos; ) { 1922 for (l = 0; l <= (*pos - iter->func_pos); ) {
1427 p = t_hash_next(m, p, &l); 1923 p = t_hash_next(m, &l);
1428 if (!p) 1924 if (!p)
1429 break; 1925 break;
1430 } 1926 }
1431 return p; 1927 if (!p)
1928 return NULL;
1929
1930 /* Only set this if we have an item */
1931 iter->flags |= FTRACE_ITER_HASH;
1932
1933 return iter;
1432} 1934}
1433 1935
1434static int t_hash_show(struct seq_file *m, void *v) 1936static int
1937t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1435{ 1938{
1436 struct ftrace_func_probe *rec; 1939 struct ftrace_func_probe *rec;
1437 struct hlist_node *hnd = v;
1438 1940
1439 rec = hlist_entry(hnd, struct ftrace_func_probe, node); 1941 rec = iter->probe;
1942 if (WARN_ON_ONCE(!rec))
1943 return -EIO;
1440 1944
1441 if (rec->ops->print) 1945 if (rec->ops->print)
1442 return rec->ops->print(m, rec->ip, rec->ops, rec->data); 1946 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
@@ -1454,15 +1958,20 @@ static void *
1454t_next(struct seq_file *m, void *v, loff_t *pos) 1958t_next(struct seq_file *m, void *v, loff_t *pos)
1455{ 1959{
1456 struct ftrace_iterator *iter = m->private; 1960 struct ftrace_iterator *iter = m->private;
1961 struct ftrace_ops *ops = &global_ops;
1457 struct dyn_ftrace *rec = NULL; 1962 struct dyn_ftrace *rec = NULL;
1458 1963
1964 if (unlikely(ftrace_disabled))
1965 return NULL;
1966
1459 if (iter->flags & FTRACE_ITER_HASH) 1967 if (iter->flags & FTRACE_ITER_HASH)
1460 return t_hash_next(m, v, pos); 1968 return t_hash_next(m, pos);
1461 1969
1462 (*pos)++; 1970 (*pos)++;
1971 iter->pos = iter->func_pos = *pos;
1463 1972
1464 if (iter->flags & FTRACE_ITER_PRINTALL) 1973 if (iter->flags & FTRACE_ITER_PRINTALL)
1465 return NULL; 1974 return t_hash_start(m, pos);
1466 1975
1467 retry: 1976 retry:
1468 if (iter->idx >= iter->pg->index) { 1977 if (iter->idx >= iter->pg->index) {
@@ -1475,38 +1984,59 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1475 rec = &iter->pg->records[iter->idx++]; 1984 rec = &iter->pg->records[iter->idx++];
1476 if ((rec->flags & FTRACE_FL_FREE) || 1985 if ((rec->flags & FTRACE_FL_FREE) ||
1477 1986
1478 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1479 (rec->flags & FTRACE_FL_FAILED)) ||
1480
1481 ((iter->flags & FTRACE_ITER_FAILURES) &&
1482 !(rec->flags & FTRACE_FL_FAILED)) ||
1483
1484 ((iter->flags & FTRACE_ITER_FILTER) && 1987 ((iter->flags & FTRACE_ITER_FILTER) &&
1485 !(rec->flags & FTRACE_FL_FILTER)) || 1988 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1486 1989
1487 ((iter->flags & FTRACE_ITER_NOTRACE) && 1990 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1488 !(rec->flags & FTRACE_FL_NOTRACE))) { 1991 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1992
1993 ((iter->flags & FTRACE_ITER_ENABLED) &&
1994 !(rec->flags & ~FTRACE_FL_MASK))) {
1995
1489 rec = NULL; 1996 rec = NULL;
1490 goto retry; 1997 goto retry;
1491 } 1998 }
1492 } 1999 }
1493 2000
1494 return rec; 2001 if (!rec)
2002 return t_hash_start(m, pos);
2003
2004 iter->func = rec;
2005
2006 return iter;
2007}
2008
2009static void reset_iter_read(struct ftrace_iterator *iter)
2010{
2011 iter->pos = 0;
2012 iter->func_pos = 0;
2013 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1495} 2014}
1496 2015
1497static void *t_start(struct seq_file *m, loff_t *pos) 2016static void *t_start(struct seq_file *m, loff_t *pos)
1498{ 2017{
1499 struct ftrace_iterator *iter = m->private; 2018 struct ftrace_iterator *iter = m->private;
2019 struct ftrace_ops *ops = &global_ops;
1500 void *p = NULL; 2020 void *p = NULL;
1501 loff_t l; 2021 loff_t l;
1502 2022
1503 mutex_lock(&ftrace_lock); 2023 mutex_lock(&ftrace_lock);
2024
2025 if (unlikely(ftrace_disabled))
2026 return NULL;
2027
2028 /*
2029 * If an lseek was done, then reset and start from beginning.
2030 */
2031 if (*pos < iter->pos)
2032 reset_iter_read(iter);
2033
1504 /* 2034 /*
1505 * For set_ftrace_filter reading, if we have the filter 2035 * For set_ftrace_filter reading, if we have the filter
1506 * off, we can short cut and just print out that all 2036 * off, we can short cut and just print out that all
1507 * functions are enabled. 2037 * functions are enabled.
1508 */ 2038 */
1509 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { 2039 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
1510 if (*pos > 0) 2040 if (*pos > 0)
1511 return t_hash_start(m, pos); 2041 return t_hash_start(m, pos);
1512 iter->flags |= FTRACE_ITER_PRINTALL; 2042 iter->flags |= FTRACE_ITER_PRINTALL;
@@ -1518,6 +2048,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1518 if (iter->flags & FTRACE_ITER_HASH) 2048 if (iter->flags & FTRACE_ITER_HASH)
1519 return t_hash_start(m, pos); 2049 return t_hash_start(m, pos);
1520 2050
2051 /*
2052 * Unfortunately, we need to restart at ftrace_pages_start
2053 * every time we let go of the ftrace_mutex. This is because
2054 * those pointers can change without the lock.
2055 */
1521 iter->pg = ftrace_pages_start; 2056 iter->pg = ftrace_pages_start;
1522 iter->idx = 0; 2057 iter->idx = 0;
1523 for (l = 0; l <= *pos; ) { 2058 for (l = 0; l <= *pos; ) {
@@ -1526,10 +2061,14 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1526 break; 2061 break;
1527 } 2062 }
1528 2063
1529 if (!p && iter->flags & FTRACE_ITER_FILTER) 2064 if (!p) {
1530 return t_hash_start(m, pos); 2065 if (iter->flags & FTRACE_ITER_FILTER)
2066 return t_hash_start(m, pos);
1531 2067
1532 return p; 2068 return NULL;
2069 }
2070
2071 return iter;
1533} 2072}
1534 2073
1535static void t_stop(struct seq_file *m, void *p) 2074static void t_stop(struct seq_file *m, void *p)
@@ -1540,20 +2079,26 @@ static void t_stop(struct seq_file *m, void *p)
1540static int t_show(struct seq_file *m, void *v) 2079static int t_show(struct seq_file *m, void *v)
1541{ 2080{
1542 struct ftrace_iterator *iter = m->private; 2081 struct ftrace_iterator *iter = m->private;
1543 struct dyn_ftrace *rec = v; 2082 struct dyn_ftrace *rec;
1544 2083
1545 if (iter->flags & FTRACE_ITER_HASH) 2084 if (iter->flags & FTRACE_ITER_HASH)
1546 return t_hash_show(m, v); 2085 return t_hash_show(m, iter);
1547 2086
1548 if (iter->flags & FTRACE_ITER_PRINTALL) { 2087 if (iter->flags & FTRACE_ITER_PRINTALL) {
1549 seq_printf(m, "#### all functions enabled ####\n"); 2088 seq_printf(m, "#### all functions enabled ####\n");
1550 return 0; 2089 return 0;
1551 } 2090 }
1552 2091
2092 rec = iter->func;
2093
1553 if (!rec) 2094 if (!rec)
1554 return 0; 2095 return 0;
1555 2096
1556 seq_printf(m, "%ps\n", (void *)rec->ip); 2097 seq_printf(m, "%ps", (void *)rec->ip);
2098 if (iter->flags & FTRACE_ITER_ENABLED)
2099 seq_printf(m, " (%ld)",
2100 rec->flags & ~FTRACE_FL_MASK);
2101 seq_printf(m, "\n");
1557 2102
1558 return 0; 2103 return 0;
1559} 2104}
@@ -1593,44 +2138,46 @@ ftrace_avail_open(struct inode *inode, struct file *file)
1593} 2138}
1594 2139
1595static int 2140static int
1596ftrace_failures_open(struct inode *inode, struct file *file) 2141ftrace_enabled_open(struct inode *inode, struct file *file)
1597{ 2142{
1598 int ret;
1599 struct seq_file *m;
1600 struct ftrace_iterator *iter; 2143 struct ftrace_iterator *iter;
2144 int ret;
2145
2146 if (unlikely(ftrace_disabled))
2147 return -ENODEV;
2148
2149 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2150 if (!iter)
2151 return -ENOMEM;
2152
2153 iter->pg = ftrace_pages_start;
2154 iter->flags = FTRACE_ITER_ENABLED;
1601 2155
1602 ret = ftrace_avail_open(inode, file); 2156 ret = seq_open(file, &show_ftrace_seq_ops);
1603 if (!ret) { 2157 if (!ret) {
1604 m = (struct seq_file *)file->private_data; 2158 struct seq_file *m = file->private_data;
1605 iter = (struct ftrace_iterator *)m->private; 2159
1606 iter->flags = FTRACE_ITER_FAILURES; 2160 m->private = iter;
2161 } else {
2162 kfree(iter);
1607 } 2163 }
1608 2164
1609 return ret; 2165 return ret;
1610} 2166}
1611 2167
1612 2168static void ftrace_filter_reset(struct ftrace_hash *hash)
1613static void ftrace_filter_reset(int enable)
1614{ 2169{
1615 struct ftrace_page *pg;
1616 struct dyn_ftrace *rec;
1617 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1618
1619 mutex_lock(&ftrace_lock); 2170 mutex_lock(&ftrace_lock);
1620 if (enable) 2171 ftrace_hash_clear(hash);
1621 ftrace_filtered = 0;
1622 do_for_each_ftrace_rec(pg, rec) {
1623 if (rec->flags & FTRACE_FL_FAILED)
1624 continue;
1625 rec->flags &= ~type;
1626 } while_for_each_ftrace_rec();
1627 mutex_unlock(&ftrace_lock); 2172 mutex_unlock(&ftrace_lock);
1628} 2173}
1629 2174
1630static int 2175static int
1631ftrace_regex_open(struct inode *inode, struct file *file, int enable) 2176ftrace_regex_open(struct ftrace_ops *ops, int flag,
2177 struct inode *inode, struct file *file)
1632{ 2178{
1633 struct ftrace_iterator *iter; 2179 struct ftrace_iterator *iter;
2180 struct ftrace_hash *hash;
1634 int ret = 0; 2181 int ret = 0;
1635 2182
1636 if (unlikely(ftrace_disabled)) 2183 if (unlikely(ftrace_disabled))
@@ -1645,21 +2192,42 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1645 return -ENOMEM; 2192 return -ENOMEM;
1646 } 2193 }
1647 2194
2195 if (flag & FTRACE_ITER_NOTRACE)
2196 hash = ops->notrace_hash;
2197 else
2198 hash = ops->filter_hash;
2199
2200 iter->ops = ops;
2201 iter->flags = flag;
2202
2203 if (file->f_mode & FMODE_WRITE) {
2204 mutex_lock(&ftrace_lock);
2205 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2206 mutex_unlock(&ftrace_lock);
2207
2208 if (!iter->hash) {
2209 trace_parser_put(&iter->parser);
2210 kfree(iter);
2211 return -ENOMEM;
2212 }
2213 }
2214
1648 mutex_lock(&ftrace_regex_lock); 2215 mutex_lock(&ftrace_regex_lock);
2216
1649 if ((file->f_mode & FMODE_WRITE) && 2217 if ((file->f_mode & FMODE_WRITE) &&
1650 (file->f_flags & O_TRUNC)) 2218 (file->f_flags & O_TRUNC))
1651 ftrace_filter_reset(enable); 2219 ftrace_filter_reset(iter->hash);
1652 2220
1653 if (file->f_mode & FMODE_READ) { 2221 if (file->f_mode & FMODE_READ) {
1654 iter->pg = ftrace_pages_start; 2222 iter->pg = ftrace_pages_start;
1655 iter->flags = enable ? FTRACE_ITER_FILTER :
1656 FTRACE_ITER_NOTRACE;
1657 2223
1658 ret = seq_open(file, &show_ftrace_seq_ops); 2224 ret = seq_open(file, &show_ftrace_seq_ops);
1659 if (!ret) { 2225 if (!ret) {
1660 struct seq_file *m = file->private_data; 2226 struct seq_file *m = file->private_data;
1661 m->private = iter; 2227 m->private = iter;
1662 } else { 2228 } else {
2229 /* Failed */
2230 free_ftrace_hash(iter->hash);
1663 trace_parser_put(&iter->parser); 2231 trace_parser_put(&iter->parser);
1664 kfree(iter); 2232 kfree(iter);
1665 } 2233 }
@@ -1673,13 +2241,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1673static int 2241static int
1674ftrace_filter_open(struct inode *inode, struct file *file) 2242ftrace_filter_open(struct inode *inode, struct file *file)
1675{ 2243{
1676 return ftrace_regex_open(inode, file, 1); 2244 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2245 inode, file);
1677} 2246}
1678 2247
1679static int 2248static int
1680ftrace_notrace_open(struct inode *inode, struct file *file) 2249ftrace_notrace_open(struct inode *inode, struct file *file)
1681{ 2250{
1682 return ftrace_regex_open(inode, file, 0); 2251 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2252 inode, file);
1683} 2253}
1684 2254
1685static loff_t 2255static loff_t
@@ -1724,86 +2294,99 @@ static int ftrace_match(char *str, char *regex, int len, int type)
1724} 2294}
1725 2295
1726static int 2296static int
1727ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) 2297enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2298{
2299 struct ftrace_func_entry *entry;
2300 int ret = 0;
2301
2302 entry = ftrace_lookup_ip(hash, rec->ip);
2303 if (not) {
2304 /* Do nothing if it doesn't exist */
2305 if (!entry)
2306 return 0;
2307
2308 free_hash_entry(hash, entry);
2309 } else {
2310 /* Do nothing if it exists */
2311 if (entry)
2312 return 0;
2313
2314 ret = add_hash_entry(hash, rec->ip);
2315 }
2316 return ret;
2317}
2318
2319static int
2320ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2321 char *regex, int len, int type)
1728{ 2322{
1729 char str[KSYM_SYMBOL_LEN]; 2323 char str[KSYM_SYMBOL_LEN];
2324 char *modname;
2325
2326 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2327
2328 if (mod) {
2329 /* module lookup requires matching the module */
2330 if (!modname || strcmp(modname, mod))
2331 return 0;
2332
2333 /* blank search means to match all funcs in the mod */
2334 if (!len)
2335 return 1;
2336 }
1730 2337
1731 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1732 return ftrace_match(str, regex, len, type); 2338 return ftrace_match(str, regex, len, type);
1733} 2339}
1734 2340
1735static int ftrace_match_records(char *buff, int len, int enable) 2341static int
2342match_records(struct ftrace_hash *hash, char *buff,
2343 int len, char *mod, int not)
1736{ 2344{
1737 unsigned int search_len; 2345 unsigned search_len = 0;
1738 struct ftrace_page *pg; 2346 struct ftrace_page *pg;
1739 struct dyn_ftrace *rec; 2347 struct dyn_ftrace *rec;
1740 unsigned long flag; 2348 int type = MATCH_FULL;
1741 char *search; 2349 char *search = buff;
1742 int type;
1743 int not;
1744 int found = 0; 2350 int found = 0;
2351 int ret;
1745 2352
1746 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 2353 if (len) {
1747 type = filter_parse_regex(buff, len, &search, &not); 2354 type = filter_parse_regex(buff, len, &search, &not);
1748 2355 search_len = strlen(search);
1749 search_len = strlen(search); 2356 }
1750 2357
1751 mutex_lock(&ftrace_lock); 2358 mutex_lock(&ftrace_lock);
1752 do_for_each_ftrace_rec(pg, rec) {
1753 2359
1754 if (rec->flags & FTRACE_FL_FAILED) 2360 if (unlikely(ftrace_disabled))
1755 continue; 2361 goto out_unlock;
1756 2362
1757 if (ftrace_match_record(rec, search, search_len, type)) { 2363 do_for_each_ftrace_rec(pg, rec) {
1758 if (not) 2364
1759 rec->flags &= ~flag; 2365 if (ftrace_match_record(rec, mod, search, search_len, type)) {
1760 else 2366 ret = enter_record(hash, rec, not);
1761 rec->flags |= flag; 2367 if (ret < 0) {
2368 found = ret;
2369 goto out_unlock;
2370 }
1762 found = 1; 2371 found = 1;
1763 } 2372 }
1764 /*
1765 * Only enable filtering if we have a function that
1766 * is filtered on.
1767 */
1768 if (enable && (rec->flags & FTRACE_FL_FILTER))
1769 ftrace_filtered = 1;
1770 } while_for_each_ftrace_rec(); 2373 } while_for_each_ftrace_rec();
2374 out_unlock:
1771 mutex_unlock(&ftrace_lock); 2375 mutex_unlock(&ftrace_lock);
1772 2376
1773 return found; 2377 return found;
1774} 2378}
1775 2379
1776static int 2380static int
1777ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, 2381ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
1778 char *regex, int len, int type)
1779{ 2382{
1780 char str[KSYM_SYMBOL_LEN]; 2383 return match_records(hash, buff, len, NULL, 0);
1781 char *modname;
1782
1783 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1784
1785 if (!modname || strcmp(modname, mod))
1786 return 0;
1787
1788 /* blank search means to match all funcs in the mod */
1789 if (len)
1790 return ftrace_match(str, regex, len, type);
1791 else
1792 return 1;
1793} 2384}
1794 2385
1795static int ftrace_match_module_records(char *buff, char *mod, int enable) 2386static int
2387ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
1796{ 2388{
1797 unsigned search_len = 0;
1798 struct ftrace_page *pg;
1799 struct dyn_ftrace *rec;
1800 int type = MATCH_FULL;
1801 char *search = buff;
1802 unsigned long flag;
1803 int not = 0; 2389 int not = 0;
1804 int found = 0;
1805
1806 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1807 2390
1808 /* blank or '*' mean the same */ 2391 /* blank or '*' mean the same */
1809 if (strcmp(buff, "*") == 0) 2392 if (strcmp(buff, "*") == 0)
@@ -1815,32 +2398,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable)
1815 not = 1; 2398 not = 1;
1816 } 2399 }
1817 2400
1818 if (strlen(buff)) { 2401 return match_records(hash, buff, strlen(buff), mod, not);
1819 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1820 search_len = strlen(search);
1821 }
1822
1823 mutex_lock(&ftrace_lock);
1824 do_for_each_ftrace_rec(pg, rec) {
1825
1826 if (rec->flags & FTRACE_FL_FAILED)
1827 continue;
1828
1829 if (ftrace_match_module_record(rec, mod,
1830 search, search_len, type)) {
1831 if (not)
1832 rec->flags &= ~flag;
1833 else
1834 rec->flags |= flag;
1835 found = 1;
1836 }
1837 if (enable && (rec->flags & FTRACE_FL_FILTER))
1838 ftrace_filtered = 1;
1839
1840 } while_for_each_ftrace_rec();
1841 mutex_unlock(&ftrace_lock);
1842
1843 return found;
1844} 2402}
1845 2403
1846/* 2404/*
@@ -1851,7 +2409,10 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable)
1851static int 2409static int
1852ftrace_mod_callback(char *func, char *cmd, char *param, int enable) 2410ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1853{ 2411{
2412 struct ftrace_ops *ops = &global_ops;
2413 struct ftrace_hash *hash;
1854 char *mod; 2414 char *mod;
2415 int ret = -EINVAL;
1855 2416
1856 /* 2417 /*
1857 * cmd == 'mod' because we only registered this func 2418 * cmd == 'mod' because we only registered this func
@@ -1863,15 +2424,24 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1863 2424
1864 /* we must have a module name */ 2425 /* we must have a module name */
1865 if (!param) 2426 if (!param)
1866 return -EINVAL; 2427 return ret;
1867 2428
1868 mod = strsep(&param, ":"); 2429 mod = strsep(&param, ":");
1869 if (!strlen(mod)) 2430 if (!strlen(mod))
1870 return -EINVAL; 2431 return ret;
1871 2432
1872 if (ftrace_match_module_records(func, mod, enable)) 2433 if (enable)
1873 return 0; 2434 hash = ops->filter_hash;
1874 return -EINVAL; 2435 else
2436 hash = ops->notrace_hash;
2437
2438 ret = ftrace_match_module_records(hash, func, mod);
2439 if (!ret)
2440 ret = -EINVAL;
2441 if (ret < 0)
2442 return ret;
2443
2444 return 0;
1875} 2445}
1876 2446
1877static struct ftrace_func_command ftrace_mod_cmd = { 2447static struct ftrace_func_command ftrace_mod_cmd = {
@@ -1922,6 +2492,7 @@ static int ftrace_probe_registered;
1922 2492
1923static void __enable_ftrace_function_probe(void) 2493static void __enable_ftrace_function_probe(void)
1924{ 2494{
2495 int ret;
1925 int i; 2496 int i;
1926 2497
1927 if (ftrace_probe_registered) 2498 if (ftrace_probe_registered)
@@ -1936,13 +2507,16 @@ static void __enable_ftrace_function_probe(void)
1936 if (i == FTRACE_FUNC_HASHSIZE) 2507 if (i == FTRACE_FUNC_HASHSIZE)
1937 return; 2508 return;
1938 2509
1939 __register_ftrace_function(&trace_probe_ops); 2510 ret = __register_ftrace_function(&trace_probe_ops);
1940 ftrace_startup(0); 2511 if (!ret)
2512 ret = ftrace_startup(&trace_probe_ops, 0);
2513
1941 ftrace_probe_registered = 1; 2514 ftrace_probe_registered = 1;
1942} 2515}
1943 2516
1944static void __disable_ftrace_function_probe(void) 2517static void __disable_ftrace_function_probe(void)
1945{ 2518{
2519 int ret;
1946 int i; 2520 int i;
1947 2521
1948 if (!ftrace_probe_registered) 2522 if (!ftrace_probe_registered)
@@ -1955,8 +2529,10 @@ static void __disable_ftrace_function_probe(void)
1955 } 2529 }
1956 2530
1957 /* no more funcs left */ 2531 /* no more funcs left */
1958 __unregister_ftrace_function(&trace_probe_ops); 2532 ret = __unregister_ftrace_function(&trace_probe_ops);
1959 ftrace_shutdown(0); 2533 if (!ret)
2534 ftrace_shutdown(&trace_probe_ops, 0);
2535
1960 ftrace_probe_registered = 0; 2536 ftrace_probe_registered = 0;
1961} 2537}
1962 2538
@@ -1992,12 +2568,13 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1992 return -EINVAL; 2568 return -EINVAL;
1993 2569
1994 mutex_lock(&ftrace_lock); 2570 mutex_lock(&ftrace_lock);
1995 do_for_each_ftrace_rec(pg, rec) {
1996 2571
1997 if (rec->flags & FTRACE_FL_FAILED) 2572 if (unlikely(ftrace_disabled))
1998 continue; 2573 goto out_unlock;
2574
2575 do_for_each_ftrace_rec(pg, rec) {
1999 2576
2000 if (!ftrace_match_record(rec, search, len, type)) 2577 if (!ftrace_match_record(rec, NULL, search, len, type))
2001 continue; 2578 continue;
2002 2579
2003 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2580 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -2158,7 +2735,8 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
2158 return ret; 2735 return ret;
2159} 2736}
2160 2737
2161static int ftrace_process_regex(char *buff, int len, int enable) 2738static int ftrace_process_regex(struct ftrace_hash *hash,
2739 char *buff, int len, int enable)
2162{ 2740{
2163 char *func, *command, *next = buff; 2741 char *func, *command, *next = buff;
2164 struct ftrace_func_command *p; 2742 struct ftrace_func_command *p;
@@ -2167,9 +2745,12 @@ static int ftrace_process_regex(char *buff, int len, int enable)
2167 func = strsep(&next, ":"); 2745 func = strsep(&next, ":");
2168 2746
2169 if (!next) { 2747 if (!next) {
2170 if (ftrace_match_records(func, len, enable)) 2748 ret = ftrace_match_records(hash, func, len);
2171 return 0; 2749 if (!ret)
2172 return ret; 2750 ret = -EINVAL;
2751 if (ret < 0)
2752 return ret;
2753 return 0;
2173 } 2754 }
2174 2755
2175 /* command found */ 2756 /* command found */
@@ -2202,6 +2783,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2202 2783
2203 mutex_lock(&ftrace_regex_lock); 2784 mutex_lock(&ftrace_regex_lock);
2204 2785
2786 ret = -ENODEV;
2787 if (unlikely(ftrace_disabled))
2788 goto out_unlock;
2789
2205 if (file->f_mode & FMODE_READ) { 2790 if (file->f_mode & FMODE_READ) {
2206 struct seq_file *m = file->private_data; 2791 struct seq_file *m = file->private_data;
2207 iter = m->private; 2792 iter = m->private;
@@ -2213,7 +2798,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2213 2798
2214 if (read >= 0 && trace_parser_loaded(parser) && 2799 if (read >= 0 && trace_parser_loaded(parser) &&
2215 !trace_parser_cont(parser)) { 2800 !trace_parser_cont(parser)) {
2216 ret = ftrace_process_regex(parser->buffer, 2801 ret = ftrace_process_regex(iter->hash, parser->buffer,
2217 parser->idx, enable); 2802 parser->idx, enable);
2218 trace_parser_clear(parser); 2803 trace_parser_clear(parser);
2219 if (ret) 2804 if (ret)
@@ -2241,22 +2826,49 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
2241 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 2826 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2242} 2827}
2243 2828
2244static void 2829static int
2245ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) 2830ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2831 int reset, int enable)
2246{ 2832{
2833 struct ftrace_hash **orig_hash;
2834 struct ftrace_hash *hash;
2835 int ret;
2836
2837 /* All global ops uses the global ops filters */
2838 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2839 ops = &global_ops;
2840
2247 if (unlikely(ftrace_disabled)) 2841 if (unlikely(ftrace_disabled))
2248 return; 2842 return -ENODEV;
2843
2844 if (enable)
2845 orig_hash = &ops->filter_hash;
2846 else
2847 orig_hash = &ops->notrace_hash;
2848
2849 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2850 if (!hash)
2851 return -ENOMEM;
2249 2852
2250 mutex_lock(&ftrace_regex_lock); 2853 mutex_lock(&ftrace_regex_lock);
2251 if (reset) 2854 if (reset)
2252 ftrace_filter_reset(enable); 2855 ftrace_filter_reset(hash);
2253 if (buf) 2856 if (buf)
2254 ftrace_match_records(buf, len, enable); 2857 ftrace_match_records(hash, buf, len);
2858
2859 mutex_lock(&ftrace_lock);
2860 ret = ftrace_hash_move(orig_hash, hash);
2861 mutex_unlock(&ftrace_lock);
2862
2255 mutex_unlock(&ftrace_regex_lock); 2863 mutex_unlock(&ftrace_regex_lock);
2864
2865 free_ftrace_hash(hash);
2866 return ret;
2256} 2867}
2257 2868
2258/** 2869/**
2259 * ftrace_set_filter - set a function to filter on in ftrace 2870 * ftrace_set_filter - set a function to filter on in ftrace
2871 * @ops - the ops to set the filter with
2260 * @buf - the string that holds the function filter text. 2872 * @buf - the string that holds the function filter text.
2261 * @len - the length of the string. 2873 * @len - the length of the string.
2262 * @reset - non zero to reset all filters before applying this filter. 2874 * @reset - non zero to reset all filters before applying this filter.
@@ -2264,13 +2876,16 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2264 * Filters denote which functions should be enabled when tracing is enabled. 2876 * Filters denote which functions should be enabled when tracing is enabled.
2265 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 2877 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2266 */ 2878 */
2267void ftrace_set_filter(unsigned char *buf, int len, int reset) 2879void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2880 int len, int reset)
2268{ 2881{
2269 ftrace_set_regex(buf, len, reset, 1); 2882 ftrace_set_regex(ops, buf, len, reset, 1);
2270} 2883}
2884EXPORT_SYMBOL_GPL(ftrace_set_filter);
2271 2885
2272/** 2886/**
2273 * ftrace_set_notrace - set a function to not trace in ftrace 2887 * ftrace_set_notrace - set a function to not trace in ftrace
2888 * @ops - the ops to set the notrace filter with
2274 * @buf - the string that holds the function notrace text. 2889 * @buf - the string that holds the function notrace text.
2275 * @len - the length of the string. 2890 * @len - the length of the string.
2276 * @reset - non zero to reset all filters before applying this filter. 2891 * @reset - non zero to reset all filters before applying this filter.
@@ -2279,10 +2894,44 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset)
2279 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 2894 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2280 * for tracing. 2895 * for tracing.
2281 */ 2896 */
2282void ftrace_set_notrace(unsigned char *buf, int len, int reset) 2897void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2898 int len, int reset)
2283{ 2899{
2284 ftrace_set_regex(buf, len, reset, 0); 2900 ftrace_set_regex(ops, buf, len, reset, 0);
2285} 2901}
2902EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2903/**
2904 * ftrace_set_filter - set a function to filter on in ftrace
2905 * @ops - the ops to set the filter with
2906 * @buf - the string that holds the function filter text.
2907 * @len - the length of the string.
2908 * @reset - non zero to reset all filters before applying this filter.
2909 *
2910 * Filters denote which functions should be enabled when tracing is enabled.
2911 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2912 */
2913void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2914{
2915 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2916}
2917EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2918
2919/**
2920 * ftrace_set_notrace - set a function to not trace in ftrace
2921 * @ops - the ops to set the notrace filter with
2922 * @buf - the string that holds the function notrace text.
2923 * @len - the length of the string.
2924 * @reset - non zero to reset all filters before applying this filter.
2925 *
2926 * Notrace Filters denote which functions should not be enabled when tracing
2927 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2928 * for tracing.
2929 */
2930void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2931{
2932 ftrace_set_regex(&global_ops, buf, len, reset, 0);
2933}
2934EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2286 2935
2287/* 2936/*
2288 * command line interface to allow users to set filters on boot up. 2937 * command line interface to allow users to set filters on boot up.
@@ -2333,22 +2982,23 @@ static void __init set_ftrace_early_graph(char *buf)
2333} 2982}
2334#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2983#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2335 2984
2336static void __init set_ftrace_early_filter(char *buf, int enable) 2985static void __init
2986set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2337{ 2987{
2338 char *func; 2988 char *func;
2339 2989
2340 while (buf) { 2990 while (buf) {
2341 func = strsep(&buf, ","); 2991 func = strsep(&buf, ",");
2342 ftrace_set_regex(func, strlen(func), 0, enable); 2992 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2343 } 2993 }
2344} 2994}
2345 2995
2346static void __init set_ftrace_early_filters(void) 2996static void __init set_ftrace_early_filters(void)
2347{ 2997{
2348 if (ftrace_filter_buf[0]) 2998 if (ftrace_filter_buf[0])
2349 set_ftrace_early_filter(ftrace_filter_buf, 1); 2999 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2350 if (ftrace_notrace_buf[0]) 3000 if (ftrace_notrace_buf[0])
2351 set_ftrace_early_filter(ftrace_notrace_buf, 0); 3001 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2352#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3002#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2353 if (ftrace_graph_buf[0]) 3003 if (ftrace_graph_buf[0])
2354 set_ftrace_early_graph(ftrace_graph_buf); 3004 set_ftrace_early_graph(ftrace_graph_buf);
@@ -2356,11 +3006,14 @@ static void __init set_ftrace_early_filters(void)
2356} 3006}
2357 3007
2358static int 3008static int
2359ftrace_regex_release(struct inode *inode, struct file *file, int enable) 3009ftrace_regex_release(struct inode *inode, struct file *file)
2360{ 3010{
2361 struct seq_file *m = (struct seq_file *)file->private_data; 3011 struct seq_file *m = (struct seq_file *)file->private_data;
2362 struct ftrace_iterator *iter; 3012 struct ftrace_iterator *iter;
3013 struct ftrace_hash **orig_hash;
2363 struct trace_parser *parser; 3014 struct trace_parser *parser;
3015 int filter_hash;
3016 int ret;
2364 3017
2365 mutex_lock(&ftrace_regex_lock); 3018 mutex_lock(&ftrace_regex_lock);
2366 if (file->f_mode & FMODE_READ) { 3019 if (file->f_mode & FMODE_READ) {
@@ -2373,33 +3026,41 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2373 parser = &iter->parser; 3026 parser = &iter->parser;
2374 if (trace_parser_loaded(parser)) { 3027 if (trace_parser_loaded(parser)) {
2375 parser->buffer[parser->idx] = 0; 3028 parser->buffer[parser->idx] = 0;
2376 ftrace_match_records(parser->buffer, parser->idx, enable); 3029 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
2377 } 3030 }
2378 3031
2379 mutex_lock(&ftrace_lock);
2380 if (ftrace_start_up && ftrace_enabled)
2381 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2382 mutex_unlock(&ftrace_lock);
2383
2384 trace_parser_put(parser); 3032 trace_parser_put(parser);
3033
3034 if (file->f_mode & FMODE_WRITE) {
3035 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3036
3037 if (filter_hash)
3038 orig_hash = &iter->ops->filter_hash;
3039 else
3040 orig_hash = &iter->ops->notrace_hash;
3041
3042 mutex_lock(&ftrace_lock);
3043 /*
3044 * Remove the current set, update the hash and add
3045 * them back.
3046 */
3047 ftrace_hash_rec_disable(iter->ops, filter_hash);
3048 ret = ftrace_hash_move(orig_hash, iter->hash);
3049 if (!ret) {
3050 ftrace_hash_rec_enable(iter->ops, filter_hash);
3051 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3052 && ftrace_enabled)
3053 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3054 }
3055 mutex_unlock(&ftrace_lock);
3056 }
3057 free_ftrace_hash(iter->hash);
2385 kfree(iter); 3058 kfree(iter);
2386 3059
2387 mutex_unlock(&ftrace_regex_lock); 3060 mutex_unlock(&ftrace_regex_lock);
2388 return 0; 3061 return 0;
2389} 3062}
2390 3063
2391static int
2392ftrace_filter_release(struct inode *inode, struct file *file)
2393{
2394 return ftrace_regex_release(inode, file, 1);
2395}
2396
2397static int
2398ftrace_notrace_release(struct inode *inode, struct file *file)
2399{
2400 return ftrace_regex_release(inode, file, 0);
2401}
2402
2403static const struct file_operations ftrace_avail_fops = { 3064static const struct file_operations ftrace_avail_fops = {
2404 .open = ftrace_avail_open, 3065 .open = ftrace_avail_open,
2405 .read = seq_read, 3066 .read = seq_read,
@@ -2407,8 +3068,8 @@ static const struct file_operations ftrace_avail_fops = {
2407 .release = seq_release_private, 3068 .release = seq_release_private,
2408}; 3069};
2409 3070
2410static const struct file_operations ftrace_failures_fops = { 3071static const struct file_operations ftrace_enabled_fops = {
2411 .open = ftrace_failures_open, 3072 .open = ftrace_enabled_open,
2412 .read = seq_read, 3073 .read = seq_read,
2413 .llseek = seq_lseek, 3074 .llseek = seq_lseek,
2414 .release = seq_release_private, 3075 .release = seq_release_private,
@@ -2418,8 +3079,8 @@ static const struct file_operations ftrace_filter_fops = {
2418 .open = ftrace_filter_open, 3079 .open = ftrace_filter_open,
2419 .read = seq_read, 3080 .read = seq_read,
2420 .write = ftrace_filter_write, 3081 .write = ftrace_filter_write,
2421 .llseek = no_llseek, 3082 .llseek = ftrace_regex_lseek,
2422 .release = ftrace_filter_release, 3083 .release = ftrace_regex_release,
2423}; 3084};
2424 3085
2425static const struct file_operations ftrace_notrace_fops = { 3086static const struct file_operations ftrace_notrace_fops = {
@@ -2427,7 +3088,7 @@ static const struct file_operations ftrace_notrace_fops = {
2427 .read = seq_read, 3088 .read = seq_read,
2428 .write = ftrace_notrace_write, 3089 .write = ftrace_notrace_write,
2429 .llseek = ftrace_regex_lseek, 3090 .llseek = ftrace_regex_lseek,
2430 .release = ftrace_notrace_release, 3091 .release = ftrace_regex_release,
2431}; 3092};
2432 3093
2433#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3094#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -2536,9 +3197,6 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2536 bool exists; 3197 bool exists;
2537 int i; 3198 int i;
2538 3199
2539 if (ftrace_disabled)
2540 return -ENODEV;
2541
2542 /* decode regex */ 3200 /* decode regex */
2543 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 3201 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2544 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) 3202 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
@@ -2547,12 +3205,18 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2547 search_len = strlen(search); 3205 search_len = strlen(search);
2548 3206
2549 mutex_lock(&ftrace_lock); 3207 mutex_lock(&ftrace_lock);
3208
3209 if (unlikely(ftrace_disabled)) {
3210 mutex_unlock(&ftrace_lock);
3211 return -ENODEV;
3212 }
3213
2550 do_for_each_ftrace_rec(pg, rec) { 3214 do_for_each_ftrace_rec(pg, rec) {
2551 3215
2552 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) 3216 if (rec->flags & FTRACE_FL_FREE)
2553 continue; 3217 continue;
2554 3218
2555 if (ftrace_match_record(rec, search, search_len, type)) { 3219 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
2556 /* if it is in the array */ 3220 /* if it is in the array */
2557 exists = false; 3221 exists = false;
2558 for (i = 0; i < *idx; i++) { 3222 for (i = 0; i < *idx; i++) {
@@ -2632,6 +3296,7 @@ static const struct file_operations ftrace_graph_fops = {
2632 .read = seq_read, 3296 .read = seq_read,
2633 .write = ftrace_graph_write, 3297 .write = ftrace_graph_write,
2634 .release = ftrace_graph_release, 3298 .release = ftrace_graph_release,
3299 .llseek = seq_lseek,
2635}; 3300};
2636#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3301#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2637 3302
@@ -2641,8 +3306,8 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2641 trace_create_file("available_filter_functions", 0444, 3306 trace_create_file("available_filter_functions", 0444,
2642 d_tracer, NULL, &ftrace_avail_fops); 3307 d_tracer, NULL, &ftrace_avail_fops);
2643 3308
2644 trace_create_file("failures", 0444, 3309 trace_create_file("enabled_functions", 0444,
2645 d_tracer, NULL, &ftrace_failures_fops); 3310 d_tracer, NULL, &ftrace_enabled_fops);
2646 3311
2647 trace_create_file("set_ftrace_filter", 0644, d_tracer, 3312 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2648 NULL, &ftrace_filter_fops); 3313 NULL, &ftrace_filter_fops);
@@ -2682,7 +3347,10 @@ static int ftrace_process_locs(struct module *mod,
2682 ftrace_record_ip(addr); 3347 ftrace_record_ip(addr);
2683 } 3348 }
2684 3349
2685 /* disable interrupts to prevent kstop machine */ 3350 /*
3351 * Disable interrupts to prevent interrupts from executing
3352 * code that is being modified.
3353 */
2686 local_irq_save(flags); 3354 local_irq_save(flags);
2687 ftrace_update_code(mod); 3355 ftrace_update_code(mod);
2688 local_irq_restore(flags); 3356 local_irq_restore(flags);
@@ -2697,10 +3365,11 @@ void ftrace_release_mod(struct module *mod)
2697 struct dyn_ftrace *rec; 3365 struct dyn_ftrace *rec;
2698 struct ftrace_page *pg; 3366 struct ftrace_page *pg;
2699 3367
3368 mutex_lock(&ftrace_lock);
3369
2700 if (ftrace_disabled) 3370 if (ftrace_disabled)
2701 return; 3371 goto out_unlock;
2702 3372
2703 mutex_lock(&ftrace_lock);
2704 do_for_each_ftrace_rec(pg, rec) { 3373 do_for_each_ftrace_rec(pg, rec) {
2705 if (within_module_core(rec->ip, mod)) { 3374 if (within_module_core(rec->ip, mod)) {
2706 /* 3375 /*
@@ -2711,6 +3380,7 @@ void ftrace_release_mod(struct module *mod)
2711 ftrace_free_rec(rec); 3380 ftrace_free_rec(rec);
2712 } 3381 }
2713 } while_for_each_ftrace_rec(); 3382 } while_for_each_ftrace_rec();
3383 out_unlock:
2714 mutex_unlock(&ftrace_lock); 3384 mutex_unlock(&ftrace_lock);
2715} 3385}
2716 3386
@@ -2797,6 +3467,10 @@ void __init ftrace_init(void)
2797 3467
2798#else 3468#else
2799 3469
3470static struct ftrace_ops global_ops = {
3471 .func = ftrace_stub,
3472};
3473
2800static int __init ftrace_nodyn_init(void) 3474static int __init ftrace_nodyn_init(void)
2801{ 3475{
2802 ftrace_enabled = 1; 3476 ftrace_enabled = 1;
@@ -2807,12 +3481,47 @@ device_initcall(ftrace_nodyn_init);
2807static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 3481static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2808static inline void ftrace_startup_enable(int command) { } 3482static inline void ftrace_startup_enable(int command) { }
2809/* Keep as macros so we do not need to define the commands */ 3483/* Keep as macros so we do not need to define the commands */
2810# define ftrace_startup(command) do { } while (0) 3484# define ftrace_startup(ops, command) \
2811# define ftrace_shutdown(command) do { } while (0) 3485 ({ \
3486 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3487 0; \
3488 })
3489# define ftrace_shutdown(ops, command) do { } while (0)
2812# define ftrace_startup_sysctl() do { } while (0) 3490# define ftrace_startup_sysctl() do { } while (0)
2813# define ftrace_shutdown_sysctl() do { } while (0) 3491# define ftrace_shutdown_sysctl() do { } while (0)
3492
3493static inline int
3494ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3495{
3496 return 1;
3497}
3498
2814#endif /* CONFIG_DYNAMIC_FTRACE */ 3499#endif /* CONFIG_DYNAMIC_FTRACE */
2815 3500
3501static void
3502ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3503{
3504 struct ftrace_ops *op;
3505
3506 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3507 return;
3508
3509 trace_recursion_set(TRACE_INTERNAL_BIT);
3510 /*
3511 * Some of the ops may be dynamically allocated,
3512 * they must be freed after a synchronize_sched().
3513 */
3514 preempt_disable_notrace();
3515 op = rcu_dereference_raw(ftrace_ops_list);
3516 while (op != &ftrace_list_end) {
3517 if (ftrace_ops_test(op, ip))
3518 op->func(ip, parent_ip);
3519 op = rcu_dereference_raw(op->next);
3520 };
3521 preempt_enable_notrace();
3522 trace_recursion_clear(TRACE_INTERNAL_BIT);
3523}
3524
2816static void clear_ftrace_swapper(void) 3525static void clear_ftrace_swapper(void)
2817{ 3526{
2818 struct task_struct *p; 3527 struct task_struct *p;
@@ -3105,19 +3814,23 @@ void ftrace_kill(void)
3105 */ 3814 */
3106int register_ftrace_function(struct ftrace_ops *ops) 3815int register_ftrace_function(struct ftrace_ops *ops)
3107{ 3816{
3108 int ret; 3817 int ret = -1;
3109
3110 if (unlikely(ftrace_disabled))
3111 return -1;
3112 3818
3113 mutex_lock(&ftrace_lock); 3819 mutex_lock(&ftrace_lock);
3114 3820
3821 if (unlikely(ftrace_disabled))
3822 goto out_unlock;
3823
3115 ret = __register_ftrace_function(ops); 3824 ret = __register_ftrace_function(ops);
3116 ftrace_startup(0); 3825 if (!ret)
3826 ret = ftrace_startup(ops, 0);
3117 3827
3828
3829 out_unlock:
3118 mutex_unlock(&ftrace_lock); 3830 mutex_unlock(&ftrace_lock);
3119 return ret; 3831 return ret;
3120} 3832}
3833EXPORT_SYMBOL_GPL(register_ftrace_function);
3121 3834
3122/** 3835/**
3123 * unregister_ftrace_function - unregister a function for profiling. 3836 * unregister_ftrace_function - unregister a function for profiling.
@@ -3131,25 +3844,27 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
3131 3844
3132 mutex_lock(&ftrace_lock); 3845 mutex_lock(&ftrace_lock);
3133 ret = __unregister_ftrace_function(ops); 3846 ret = __unregister_ftrace_function(ops);
3134 ftrace_shutdown(0); 3847 if (!ret)
3848 ftrace_shutdown(ops, 0);
3135 mutex_unlock(&ftrace_lock); 3849 mutex_unlock(&ftrace_lock);
3136 3850
3137 return ret; 3851 return ret;
3138} 3852}
3853EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3139 3854
3140int 3855int
3141ftrace_enable_sysctl(struct ctl_table *table, int write, 3856ftrace_enable_sysctl(struct ctl_table *table, int write,
3142 void __user *buffer, size_t *lenp, 3857 void __user *buffer, size_t *lenp,
3143 loff_t *ppos) 3858 loff_t *ppos)
3144{ 3859{
3145 int ret; 3860 int ret = -ENODEV;
3146
3147 if (unlikely(ftrace_disabled))
3148 return -ENODEV;
3149 3861
3150 mutex_lock(&ftrace_lock); 3862 mutex_lock(&ftrace_lock);
3151 3863
3152 ret = proc_dointvec(table, write, buffer, lenp, ppos); 3864 if (unlikely(ftrace_disabled))
3865 goto out;
3866
3867 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3153 3868
3154 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 3869 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3155 goto out; 3870 goto out;
@@ -3161,11 +3876,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3161 ftrace_startup_sysctl(); 3876 ftrace_startup_sysctl();
3162 3877
3163 /* we are starting ftrace again */ 3878 /* we are starting ftrace again */
3164 if (ftrace_list != &ftrace_list_end) { 3879 if (ftrace_ops_list != &ftrace_list_end) {
3165 if (ftrace_list->next == &ftrace_list_end) 3880 if (ftrace_ops_list->next == &ftrace_list_end)
3166 ftrace_trace_function = ftrace_list->func; 3881 ftrace_trace_function = ftrace_ops_list->func;
3167 else 3882 else
3168 ftrace_trace_function = ftrace_list_func; 3883 ftrace_trace_function = ftrace_ops_list_func;
3169 } 3884 }
3170 3885
3171 } else { 3886 } else {
@@ -3289,7 +4004,7 @@ static int start_graph_tracing(void)
3289 /* The cpu_boot init_task->ret_stack will never be freed */ 4004 /* The cpu_boot init_task->ret_stack will never be freed */
3290 for_each_online_cpu(cpu) { 4005 for_each_online_cpu(cpu) {
3291 if (!idle_task(cpu)->ret_stack) 4006 if (!idle_task(cpu)->ret_stack)
3292 ftrace_graph_init_task(idle_task(cpu)); 4007 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3293 } 4008 }
3294 4009
3295 do { 4010 do {
@@ -3354,7 +4069,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3354 ftrace_graph_return = retfunc; 4069 ftrace_graph_return = retfunc;
3355 ftrace_graph_entry = entryfunc; 4070 ftrace_graph_entry = entryfunc;
3356 4071
3357 ftrace_startup(FTRACE_START_FUNC_RET); 4072 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
3358 4073
3359out: 4074out:
3360 mutex_unlock(&ftrace_lock); 4075 mutex_unlock(&ftrace_lock);
@@ -3371,7 +4086,7 @@ void unregister_ftrace_graph(void)
3371 ftrace_graph_active--; 4086 ftrace_graph_active--;
3372 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 4087 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3373 ftrace_graph_entry = ftrace_graph_entry_stub; 4088 ftrace_graph_entry = ftrace_graph_entry_stub;
3374 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 4089 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
3375 unregister_pm_notifier(&ftrace_suspend_notifier); 4090 unregister_pm_notifier(&ftrace_suspend_notifier);
3376 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 4091 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3377 4092
@@ -3379,6 +4094,49 @@ void unregister_ftrace_graph(void)
3379 mutex_unlock(&ftrace_lock); 4094 mutex_unlock(&ftrace_lock);
3380} 4095}
3381 4096
4097static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4098
4099static void
4100graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4101{
4102 atomic_set(&t->tracing_graph_pause, 0);
4103 atomic_set(&t->trace_overrun, 0);
4104 t->ftrace_timestamp = 0;
4105 /* make curr_ret_stack visible before we add the ret_stack */
4106 smp_wmb();
4107 t->ret_stack = ret_stack;
4108}
4109
4110/*
4111 * Allocate a return stack for the idle task. May be the first
4112 * time through, or it may be done by CPU hotplug online.
4113 */
4114void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4115{
4116 t->curr_ret_stack = -1;
4117 /*
4118 * The idle task has no parent, it either has its own
4119 * stack or no stack at all.
4120 */
4121 if (t->ret_stack)
4122 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4123
4124 if (ftrace_graph_active) {
4125 struct ftrace_ret_stack *ret_stack;
4126
4127 ret_stack = per_cpu(idle_ret_stack, cpu);
4128 if (!ret_stack) {
4129 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4130 * sizeof(struct ftrace_ret_stack),
4131 GFP_KERNEL);
4132 if (!ret_stack)
4133 return;
4134 per_cpu(idle_ret_stack, cpu) = ret_stack;
4135 }
4136 graph_init_task(t, ret_stack);
4137 }
4138}
4139
3382/* Allocate a return stack for newly created task */ 4140/* Allocate a return stack for newly created task */
3383void ftrace_graph_init_task(struct task_struct *t) 4141void ftrace_graph_init_task(struct task_struct *t)
3384{ 4142{
@@ -3394,12 +4152,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3394 GFP_KERNEL); 4152 GFP_KERNEL);
3395 if (!ret_stack) 4153 if (!ret_stack)
3396 return; 4154 return;
3397 atomic_set(&t->tracing_graph_pause, 0); 4155 graph_init_task(t, ret_stack);
3398 atomic_set(&t->trace_overrun, 0);
3399 t->ftrace_timestamp = 0;
3400 /* make curr_ret_stack visable before we add the ret_stack */
3401 smp_wmb();
3402 t->ret_stack = ret_stack;
3403 } 4156 }
3404} 4157}
3405 4158