aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-14 15:29:06 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-16 22:44:09 -0500
commit59df055f1991c9fc0c71a9230663c39188f6972f (patch)
treee1799897a8c8da924a3e933f539e8869e8725cb6 /kernel/trace/ftrace.c
parente6ea44e9b4c12325337cd1c06103cd515a1c02b2 (diff)
ftrace: trace different functions with a different tracer
Impact: new feature Currently, the function tracer only gives you an ability to hook a tracer to all functions being traced. The dynamic function trace allows you to pick and choose which of those functions will be traced, but all functions being traced will call all tracers that registered with the function tracer. This patch adds a new feature that allows a tracer to hook to specific functions, even when all functions are being traced. It allows for different functions to call different tracer hooks. The way this is accomplished is by a special function that will hook to the function tracer and will set up a hash table knowing which tracer hook to call with which function. This is the most general and easiest method to accomplish this. Later, an arch may choose to supply their own method in changing the mcount call of a function to call a different tracer. But that will be an exercise for the future. To register a function: struct ftrace_hook_ops { void (*func)(unsigned long ip, unsigned long parent_ip, void **data); int (*callback)(unsigned long ip, void **data); void (*free)(void **data); }; int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, void *data); glob is a simple glob to search for the functions to hook. ops is a pointer to the operations (listed below) data is the default data to be passed to the hook functions when traced ops: func is the hook function to call when the functions are traced callback is a callback function that is called when setting up the hash. That is, if the tracer needs to do something special for each function, that is being traced, and wants to give each function its own data. The address of the entry data is passed to this callback, so that the callback may wish to update the entry to whatever it would like. free is a callback for when the entry is freed. In case the tracer allocated any data, it is give the chance to free it. To unregister we have three functions: void unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, void *data) This will unregister all hooks that match glob, point to ops, and have its data matching data. (note, if glob is NULL, blank or '*', all functions will be tested). void unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops) This will unregister all functions matching glob that has an entry pointing to ops. void unregister_ftrace_function_hook_all(char *glob) This simply unregisters all funcs. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c247
1 files changed, 247 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 157d4f68b0e0..0b80e325f296 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,7 @@
27#include <linux/sysctl.h> 27#include <linux/sysctl.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/hash.h>
30 31
31#include <asm/ftrace.h> 32#include <asm/ftrace.h>
32 33
@@ -1245,6 +1246,252 @@ static int __init ftrace_mod_cmd_init(void)
1245} 1246}
1246device_initcall(ftrace_mod_cmd_init); 1247device_initcall(ftrace_mod_cmd_init);
1247 1248
1249#define FTRACE_HASH_BITS 7
1250#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
1251static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1252
1253struct ftrace_func_hook {
1254 struct hlist_node node;
1255 struct ftrace_hook_ops *ops;
1256 unsigned long flags;
1257 unsigned long ip;
1258 void *data;
1259 struct rcu_head rcu;
1260};
1261
1262static void
1263function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
1264{
1265 struct ftrace_func_hook *entry;
1266 struct hlist_head *hhd;
1267 struct hlist_node *n;
1268 unsigned long key;
1269 int resched;
1270
1271 key = hash_long(ip, FTRACE_HASH_BITS);
1272
1273 hhd = &ftrace_func_hash[key];
1274
1275 if (hlist_empty(hhd))
1276 return;
1277
1278 /*
1279 * Disable preemption for these calls to prevent a RCU grace
1280 * period. This syncs the hash iteration and freeing of items
1281 * on the hash. rcu_read_lock is too dangerous here.
1282 */
1283 resched = ftrace_preempt_disable();
1284 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1285 if (entry->ip == ip)
1286 entry->ops->func(ip, parent_ip, &entry->data);
1287 }
1288 ftrace_preempt_enable(resched);
1289}
1290
1291static struct ftrace_ops trace_hook_ops __read_mostly =
1292{
1293 .func = function_trace_hook_call,
1294};
1295
1296static int ftrace_hook_registered;
1297
1298static void __enable_ftrace_function_hook(void)
1299{
1300 int i;
1301
1302 if (ftrace_hook_registered)
1303 return;
1304
1305 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1306 struct hlist_head *hhd = &ftrace_func_hash[i];
1307 if (hhd->first)
1308 break;
1309 }
1310 /* Nothing registered? */
1311 if (i == FTRACE_FUNC_HASHSIZE)
1312 return;
1313
1314 __register_ftrace_function(&trace_hook_ops);
1315 ftrace_startup(0);
1316 ftrace_hook_registered = 1;
1317}
1318
1319static void __disable_ftrace_function_hook(void)
1320{
1321 int i;
1322
1323 if (!ftrace_hook_registered)
1324 return;
1325
1326 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1327 struct hlist_head *hhd = &ftrace_func_hash[i];
1328 if (hhd->first)
1329 return;
1330 }
1331
1332 /* no more funcs left */
1333 __unregister_ftrace_function(&trace_hook_ops);
1334 ftrace_shutdown(0);
1335 ftrace_hook_registered = 0;
1336}
1337
1338
1339static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1340{
1341 struct ftrace_func_hook *entry =
1342 container_of(rhp, struct ftrace_func_hook, rcu);
1343
1344 if (entry->ops->free)
1345 entry->ops->free(&entry->data);
1346 kfree(entry);
1347}
1348
1349
1350int
1351register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1352 void *data)
1353{
1354 struct ftrace_func_hook *entry;
1355 struct ftrace_page *pg;
1356 struct dyn_ftrace *rec;
1357 unsigned long key;
1358 int type, len, not;
1359 int count = 0;
1360 char *search;
1361
1362 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1363 len = strlen(search);
1364
1365 /* we do not support '!' for function hooks */
1366 if (WARN_ON(not))
1367 return -EINVAL;
1368
1369 mutex_lock(&ftrace_lock);
1370 do_for_each_ftrace_rec(pg, rec) {
1371
1372 if (rec->flags & FTRACE_FL_FAILED)
1373 continue;
1374
1375 if (!ftrace_match_record(rec, search, len, type))
1376 continue;
1377
1378 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1379 if (!entry) {
1380 /* If we did not hook to any, then return error */
1381 if (!count)
1382 count = -ENOMEM;
1383 goto out_unlock;
1384 }
1385
1386 count++;
1387
1388 entry->data = data;
1389
1390 /*
1391 * The caller might want to do something special
1392 * for each function we find. We call the callback
1393 * to give the caller an opportunity to do so.
1394 */
1395 if (ops->callback) {
1396 if (ops->callback(rec->ip, &entry->data) < 0) {
1397 /* caller does not like this func */
1398 kfree(entry);
1399 continue;
1400 }
1401 }
1402
1403 entry->ops = ops;
1404 entry->ip = rec->ip;
1405
1406 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1407 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1408
1409 } while_for_each_ftrace_rec();
1410 __enable_ftrace_function_hook();
1411
1412 out_unlock:
1413 mutex_unlock(&ftrace_lock);
1414
1415 return count;
1416}
1417
1418enum {
1419 HOOK_TEST_FUNC = 1,
1420 HOOK_TEST_DATA = 2
1421};
1422
1423static void
1424__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1425 void *data, int flags)
1426{
1427 struct ftrace_func_hook *entry;
1428 struct hlist_node *n, *tmp;
1429 char str[KSYM_SYMBOL_LEN];
1430 int type = MATCH_FULL;
1431 int i, len = 0;
1432 char *search;
1433
1434 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1435 glob = NULL;
1436 else {
1437 int not;
1438
1439 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1440 len = strlen(search);
1441
1442 /* we do not support '!' for function hooks */
1443 if (WARN_ON(not))
1444 return;
1445 }
1446
1447 mutex_lock(&ftrace_lock);
1448 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1449 struct hlist_head *hhd = &ftrace_func_hash[i];
1450
1451 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1452
1453 /* break up if statements for readability */
1454 if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
1455 continue;
1456
1457 if ((flags & HOOK_TEST_DATA) && entry->data != data)
1458 continue;
1459
1460 /* do this last, since it is the most expensive */
1461 if (glob) {
1462 kallsyms_lookup(entry->ip, NULL, NULL,
1463 NULL, str);
1464 if (!ftrace_match(str, glob, len, type))
1465 continue;
1466 }
1467
1468 hlist_del(&entry->node);
1469 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1470 }
1471 }
1472 __disable_ftrace_function_hook();
1473 mutex_unlock(&ftrace_lock);
1474}
1475
1476void
1477unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1478 void *data)
1479{
1480 __unregister_ftrace_function_hook(glob, ops, data,
1481 HOOK_TEST_FUNC | HOOK_TEST_DATA);
1482}
1483
1484void
1485unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
1486{
1487 __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
1488}
1489
1490void unregister_ftrace_function_hook_all(char *glob)
1491{
1492 __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
1493}
1494
1248static LIST_HEAD(ftrace_commands); 1495static LIST_HEAD(ftrace_commands);
1249static DEFINE_MUTEX(ftrace_cmd_mutex); 1496static DEFINE_MUTEX(ftrace_cmd_mutex);
1250 1497