diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-05-02 17:34:47 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-05-18 15:29:46 -0400 |
commit | 33dc9b1267d59cef46ff0bd6bc043190845dc919 (patch) | |
tree | d5a3f78a6aabcd33b9848d3bf86b9b53ff6ea2e0 | |
parent | f45948e898e7bc76a73a468796d2ce80dd040058 (diff) |
ftrace: Separate hash allocation and assignment
When filtering, allocate a hash to insert the function records.
After the filtering is complete, assign it to the ftrace_ops structure.
This allows the ftrace_ops structure to have a much smaller array of
hash buckets instead of wasting a lot of memory.
A read only empty_hash is created to be the minimum size that any ftrace_ops
can point to.
When a new hash is created, it has the following steps:
o Allocate a default hash.
o Walk the function records assigning the filtered records to the hash
o Allocate a new hash with the appropriate size buckets
o Move the entries from the default hash to the new hash.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/ftrace.c | 275 |
1 files changed, 233 insertions, 42 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a517a6c40645..46f08264980b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -57,7 +57,8 @@ | |||
57 | /* hash bits for specific function selection */ | 57 | /* hash bits for specific function selection */ |
58 | #define FTRACE_HASH_BITS 7 | 58 | #define FTRACE_HASH_BITS 7 |
59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
60 | #define FTRACE_HASH_MAX_BITS 10 | 60 | #define FTRACE_HASH_DEFAULT_BITS 10 |
61 | #define FTRACE_HASH_MAX_BITS 12 | ||
61 | 62 | ||
62 | /* ftrace_enabled is a method to turn ftrace on or off */ | 63 | /* ftrace_enabled is a method to turn ftrace on or off */ |
63 | int ftrace_enabled __read_mostly; | 64 | int ftrace_enabled __read_mostly; |
@@ -877,22 +878,22 @@ struct ftrace_hash { | |||
877 | unsigned long count; | 878 | unsigned long count; |
878 | }; | 879 | }; |
879 | 880 | ||
880 | static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS]; | 881 | /* |
881 | static struct ftrace_hash notrace_hash = { | 882 | * We make these constant because no one should touch them, |
882 | .size_bits = FTRACE_HASH_MAX_BITS, | 883 | * but they are used as the default "empty hash", to avoid allocating |
883 | .buckets = notrace_buckets, | 884 | * it all the time. These are in a read only section such that if |
884 | }; | 885 | * anyone does try to modify it, it will cause an exception. |
885 | 886 | */ | |
886 | static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS]; | 887 | static const struct hlist_head empty_buckets[1]; |
887 | static struct ftrace_hash filter_hash = { | 888 | static const struct ftrace_hash empty_hash = { |
888 | .size_bits = FTRACE_HASH_MAX_BITS, | 889 | .buckets = (struct hlist_head *)empty_buckets, |
889 | .buckets = filter_buckets, | ||
890 | }; | 890 | }; |
891 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | ||
891 | 892 | ||
892 | struct ftrace_ops global_ops = { | 893 | struct ftrace_ops global_ops = { |
893 | .func = ftrace_stub, | 894 | .func = ftrace_stub, |
894 | .notrace_hash = ¬race_hash, | 895 | .notrace_hash = EMPTY_HASH, |
895 | .filter_hash = &filter_hash, | 896 | .filter_hash = EMPTY_HASH, |
896 | }; | 897 | }; |
897 | 898 | ||
898 | static struct dyn_ftrace *ftrace_new_addrs; | 899 | static struct dyn_ftrace *ftrace_new_addrs; |
@@ -941,31 +942,38 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
941 | return NULL; | 942 | return NULL; |
942 | } | 943 | } |
943 | 944 | ||
944 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | 945 | static void __add_hash_entry(struct ftrace_hash *hash, |
946 | struct ftrace_func_entry *entry) | ||
945 | { | 947 | { |
946 | struct ftrace_func_entry *entry; | ||
947 | struct hlist_head *hhd; | 948 | struct hlist_head *hhd; |
948 | unsigned long key; | 949 | unsigned long key; |
949 | 950 | ||
950 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
951 | if (!entry) | ||
952 | return -ENOMEM; | ||
953 | |||
954 | if (hash->size_bits) | 951 | if (hash->size_bits) |
955 | key = hash_long(ip, hash->size_bits); | 952 | key = hash_long(entry->ip, hash->size_bits); |
956 | else | 953 | else |
957 | key = 0; | 954 | key = 0; |
958 | 955 | ||
959 | entry->ip = ip; | ||
960 | hhd = &hash->buckets[key]; | 956 | hhd = &hash->buckets[key]; |
961 | hlist_add_head(&entry->hlist, hhd); | 957 | hlist_add_head(&entry->hlist, hhd); |
962 | hash->count++; | 958 | hash->count++; |
959 | } | ||
960 | |||
961 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | ||
962 | { | ||
963 | struct ftrace_func_entry *entry; | ||
964 | |||
965 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
966 | if (!entry) | ||
967 | return -ENOMEM; | ||
968 | |||
969 | entry->ip = ip; | ||
970 | __add_hash_entry(hash, entry); | ||
963 | 971 | ||
964 | return 0; | 972 | return 0; |
965 | } | 973 | } |
966 | 974 | ||
967 | static void | 975 | static void |
968 | remove_hash_entry(struct ftrace_hash *hash, | 976 | free_hash_entry(struct ftrace_hash *hash, |
969 | struct ftrace_func_entry *entry) | 977 | struct ftrace_func_entry *entry) |
970 | { | 978 | { |
971 | hlist_del(&entry->hlist); | 979 | hlist_del(&entry->hlist); |
@@ -973,6 +981,14 @@ remove_hash_entry(struct ftrace_hash *hash, | |||
973 | hash->count--; | 981 | hash->count--; |
974 | } | 982 | } |
975 | 983 | ||
984 | static void | ||
985 | remove_hash_entry(struct ftrace_hash *hash, | ||
986 | struct ftrace_func_entry *entry) | ||
987 | { | ||
988 | hlist_del(&entry->hlist); | ||
989 | hash->count--; | ||
990 | } | ||
991 | |||
976 | static void ftrace_hash_clear(struct ftrace_hash *hash) | 992 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
977 | { | 993 | { |
978 | struct hlist_head *hhd; | 994 | struct hlist_head *hhd; |
@@ -981,14 +997,156 @@ static void ftrace_hash_clear(struct ftrace_hash *hash) | |||
981 | int size = 1 << hash->size_bits; | 997 | int size = 1 << hash->size_bits; |
982 | int i; | 998 | int i; |
983 | 999 | ||
1000 | if (!hash->count) | ||
1001 | return; | ||
1002 | |||
984 | for (i = 0; i < size; i++) { | 1003 | for (i = 0; i < size; i++) { |
985 | hhd = &hash->buckets[i]; | 1004 | hhd = &hash->buckets[i]; |
986 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) | 1005 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) |
987 | remove_hash_entry(hash, entry); | 1006 | free_hash_entry(hash, entry); |
988 | } | 1007 | } |
989 | FTRACE_WARN_ON(hash->count); | 1008 | FTRACE_WARN_ON(hash->count); |
990 | } | 1009 | } |
991 | 1010 | ||
1011 | static void free_ftrace_hash(struct ftrace_hash *hash) | ||
1012 | { | ||
1013 | if (!hash || hash == EMPTY_HASH) | ||
1014 | return; | ||
1015 | ftrace_hash_clear(hash); | ||
1016 | kfree(hash->buckets); | ||
1017 | kfree(hash); | ||
1018 | } | ||
1019 | |||
1020 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | ||
1021 | { | ||
1022 | struct ftrace_hash *hash; | ||
1023 | int size; | ||
1024 | |||
1025 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | ||
1026 | if (!hash) | ||
1027 | return NULL; | ||
1028 | |||
1029 | size = 1 << size_bits; | ||
1030 | hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); | ||
1031 | |||
1032 | if (!hash->buckets) { | ||
1033 | kfree(hash); | ||
1034 | return NULL; | ||
1035 | } | ||
1036 | |||
1037 | hash->size_bits = size_bits; | ||
1038 | |||
1039 | return hash; | ||
1040 | } | ||
1041 | |||
1042 | static struct ftrace_hash * | ||
1043 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | ||
1044 | { | ||
1045 | struct ftrace_func_entry *entry; | ||
1046 | struct ftrace_hash *new_hash; | ||
1047 | struct hlist_node *tp; | ||
1048 | int size; | ||
1049 | int ret; | ||
1050 | int i; | ||
1051 | |||
1052 | new_hash = alloc_ftrace_hash(size_bits); | ||
1053 | if (!new_hash) | ||
1054 | return NULL; | ||
1055 | |||
1056 | /* Empty hash? */ | ||
1057 | if (!hash || !hash->count) | ||
1058 | return new_hash; | ||
1059 | |||
1060 | size = 1 << hash->size_bits; | ||
1061 | for (i = 0; i < size; i++) { | ||
1062 | hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { | ||
1063 | ret = add_hash_entry(new_hash, entry->ip); | ||
1064 | if (ret < 0) | ||
1065 | goto free_hash; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | FTRACE_WARN_ON(new_hash->count != hash->count); | ||
1070 | |||
1071 | return new_hash; | ||
1072 | |||
1073 | free_hash: | ||
1074 | free_ftrace_hash(new_hash); | ||
1075 | return NULL; | ||
1076 | } | ||
1077 | |||
1078 | static int | ||
1079 | ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) | ||
1080 | { | ||
1081 | struct ftrace_func_entry *entry; | ||
1082 | struct hlist_node *tp, *tn; | ||
1083 | struct hlist_head *hhd; | ||
1084 | struct ftrace_hash *hash = *dst; | ||
1085 | unsigned long key; | ||
1086 | int size = src->count; | ||
1087 | int bits = 0; | ||
1088 | int i; | ||
1089 | |||
1090 | /* | ||
1091 | * If the new source is empty, just free dst and assign it | ||
1092 | * the empty_hash. | ||
1093 | */ | ||
1094 | if (!src->count) { | ||
1095 | free_ftrace_hash(*dst); | ||
1096 | *dst = EMPTY_HASH; | ||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | ftrace_hash_clear(hash); | ||
1101 | |||
1102 | /* | ||
1103 | * Make the hash size about 1/2 the # found | ||
1104 | */ | ||
1105 | for (size /= 2; size; size >>= 1) | ||
1106 | bits++; | ||
1107 | |||
1108 | /* Don't allocate too much */ | ||
1109 | if (bits > FTRACE_HASH_MAX_BITS) | ||
1110 | bits = FTRACE_HASH_MAX_BITS; | ||
1111 | |||
1112 | /* We can't modify the empty_hash */ | ||
1113 | if (hash == EMPTY_HASH) { | ||
1114 | /* Create a new hash */ | ||
1115 | *dst = alloc_ftrace_hash(bits); | ||
1116 | if (!*dst) { | ||
1117 | *dst = EMPTY_HASH; | ||
1118 | return -ENOMEM; | ||
1119 | } | ||
1120 | hash = *dst; | ||
1121 | } else { | ||
1122 | size = 1 << bits; | ||
1123 | |||
1124 | /* Use the old hash, but create new buckets */ | ||
1125 | hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL); | ||
1126 | if (!hhd) | ||
1127 | return -ENOMEM; | ||
1128 | |||
1129 | kfree(hash->buckets); | ||
1130 | hash->buckets = hhd; | ||
1131 | hash->size_bits = bits; | ||
1132 | } | ||
1133 | |||
1134 | size = 1 << src->size_bits; | ||
1135 | for (i = 0; i < size; i++) { | ||
1136 | hhd = &src->buckets[i]; | ||
1137 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { | ||
1138 | if (bits > 0) | ||
1139 | key = hash_long(entry->ip, bits); | ||
1140 | else | ||
1141 | key = 0; | ||
1142 | remove_hash_entry(src, entry); | ||
1143 | __add_hash_entry(hash, entry); | ||
1144 | } | ||
1145 | } | ||
1146 | |||
1147 | return 0; | ||
1148 | } | ||
1149 | |||
992 | /* | 1150 | /* |
993 | * This is a double for. Do not use 'break' to break out of the loop, | 1151 | * This is a double for. Do not use 'break' to break out of the loop, |
994 | * you must use a goto. | 1152 | * you must use a goto. |
@@ -1443,6 +1601,7 @@ struct ftrace_iterator { | |||
1443 | struct ftrace_func_probe *probe; | 1601 | struct ftrace_func_probe *probe; |
1444 | struct trace_parser parser; | 1602 | struct trace_parser parser; |
1445 | struct ftrace_hash *hash; | 1603 | struct ftrace_hash *hash; |
1604 | struct ftrace_ops *ops; | ||
1446 | int hidx; | 1605 | int hidx; |
1447 | int idx; | 1606 | int idx; |
1448 | unsigned flags; | 1607 | unsigned flags; |
@@ -1742,22 +1901,37 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
1742 | else | 1901 | else |
1743 | hash = ops->filter_hash; | 1902 | hash = ops->filter_hash; |
1744 | 1903 | ||
1745 | iter->hash = hash; | 1904 | iter->ops = ops; |
1905 | iter->flags = flag; | ||
1906 | |||
1907 | if (file->f_mode & FMODE_WRITE) { | ||
1908 | mutex_lock(&ftrace_lock); | ||
1909 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | ||
1910 | mutex_unlock(&ftrace_lock); | ||
1911 | |||
1912 | if (!iter->hash) { | ||
1913 | trace_parser_put(&iter->parser); | ||
1914 | kfree(iter); | ||
1915 | return -ENOMEM; | ||
1916 | } | ||
1917 | } | ||
1746 | 1918 | ||
1747 | mutex_lock(&ftrace_regex_lock); | 1919 | mutex_lock(&ftrace_regex_lock); |
1920 | |||
1748 | if ((file->f_mode & FMODE_WRITE) && | 1921 | if ((file->f_mode & FMODE_WRITE) && |
1749 | (file->f_flags & O_TRUNC)) | 1922 | (file->f_flags & O_TRUNC)) |
1750 | ftrace_filter_reset(hash); | 1923 | ftrace_filter_reset(iter->hash); |
1751 | 1924 | ||
1752 | if (file->f_mode & FMODE_READ) { | 1925 | if (file->f_mode & FMODE_READ) { |
1753 | iter->pg = ftrace_pages_start; | 1926 | iter->pg = ftrace_pages_start; |
1754 | iter->flags = flag; | ||
1755 | 1927 | ||
1756 | ret = seq_open(file, &show_ftrace_seq_ops); | 1928 | ret = seq_open(file, &show_ftrace_seq_ops); |
1757 | if (!ret) { | 1929 | if (!ret) { |
1758 | struct seq_file *m = file->private_data; | 1930 | struct seq_file *m = file->private_data; |
1759 | m->private = iter; | 1931 | m->private = iter; |
1760 | } else { | 1932 | } else { |
1933 | /* Failed */ | ||
1934 | free_ftrace_hash(iter->hash); | ||
1761 | trace_parser_put(&iter->parser); | 1935 | trace_parser_put(&iter->parser); |
1762 | kfree(iter); | 1936 | kfree(iter); |
1763 | } | 1937 | } |
@@ -1835,7 +2009,7 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) | |||
1835 | if (!entry) | 2009 | if (!entry) |
1836 | return 0; | 2010 | return 0; |
1837 | 2011 | ||
1838 | remove_hash_entry(hash, entry); | 2012 | free_hash_entry(hash, entry); |
1839 | } else { | 2013 | } else { |
1840 | /* Do nothing if it exists */ | 2014 | /* Do nothing if it exists */ |
1841 | if (entry) | 2015 | if (entry) |
@@ -2259,19 +2433,13 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) | |||
2259 | return ret; | 2433 | return ret; |
2260 | } | 2434 | } |
2261 | 2435 | ||
2262 | static int ftrace_process_regex(char *buff, int len, int enable) | 2436 | static int ftrace_process_regex(struct ftrace_hash *hash, |
2437 | char *buff, int len, int enable) | ||
2263 | { | 2438 | { |
2264 | char *func, *command, *next = buff; | 2439 | char *func, *command, *next = buff; |
2265 | struct ftrace_ops *ops = &global_ops; | ||
2266 | struct ftrace_func_command *p; | 2440 | struct ftrace_func_command *p; |
2267 | struct ftrace_hash *hash; | ||
2268 | int ret; | 2441 | int ret; |
2269 | 2442 | ||
2270 | if (enable) | ||
2271 | hash = ops->filter_hash; | ||
2272 | else | ||
2273 | hash = ops->notrace_hash; | ||
2274 | |||
2275 | func = strsep(&next, ":"); | 2443 | func = strsep(&next, ":"); |
2276 | 2444 | ||
2277 | if (!next) { | 2445 | if (!next) { |
@@ -2328,7 +2496,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2328 | 2496 | ||
2329 | if (read >= 0 && trace_parser_loaded(parser) && | 2497 | if (read >= 0 && trace_parser_loaded(parser) && |
2330 | !trace_parser_cont(parser)) { | 2498 | !trace_parser_cont(parser)) { |
2331 | ret = ftrace_process_regex(parser->buffer, | 2499 | ret = ftrace_process_regex(iter->hash, parser->buffer, |
2332 | parser->idx, enable); | 2500 | parser->idx, enable); |
2333 | trace_parser_clear(parser); | 2501 | trace_parser_clear(parser); |
2334 | if (ret) | 2502 | if (ret) |
@@ -2356,26 +2524,40 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
2356 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 2524 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
2357 | } | 2525 | } |
2358 | 2526 | ||
2359 | static void | 2527 | static int |
2360 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | 2528 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
2361 | int reset, int enable) | 2529 | int reset, int enable) |
2362 | { | 2530 | { |
2531 | struct ftrace_hash **orig_hash; | ||
2363 | struct ftrace_hash *hash; | 2532 | struct ftrace_hash *hash; |
2533 | int ret; | ||
2364 | 2534 | ||
2365 | if (unlikely(ftrace_disabled)) | 2535 | if (unlikely(ftrace_disabled)) |
2366 | return; | 2536 | return -ENODEV; |
2367 | 2537 | ||
2368 | if (enable) | 2538 | if (enable) |
2369 | hash = ops->filter_hash; | 2539 | orig_hash = &ops->filter_hash; |
2370 | else | 2540 | else |
2371 | hash = ops->notrace_hash; | 2541 | orig_hash = &ops->notrace_hash; |
2542 | |||
2543 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
2544 | if (!hash) | ||
2545 | return -ENOMEM; | ||
2372 | 2546 | ||
2373 | mutex_lock(&ftrace_regex_lock); | 2547 | mutex_lock(&ftrace_regex_lock); |
2374 | if (reset) | 2548 | if (reset) |
2375 | ftrace_filter_reset(hash); | 2549 | ftrace_filter_reset(hash); |
2376 | if (buf) | 2550 | if (buf) |
2377 | ftrace_match_records(hash, buf, len); | 2551 | ftrace_match_records(hash, buf, len); |
2552 | |||
2553 | mutex_lock(&ftrace_lock); | ||
2554 | ret = ftrace_hash_move(orig_hash, hash); | ||
2555 | mutex_unlock(&ftrace_lock); | ||
2556 | |||
2378 | mutex_unlock(&ftrace_regex_lock); | 2557 | mutex_unlock(&ftrace_regex_lock); |
2558 | |||
2559 | free_ftrace_hash(hash); | ||
2560 | return ret; | ||
2379 | } | 2561 | } |
2380 | 2562 | ||
2381 | /** | 2563 | /** |
@@ -2484,7 +2666,9 @@ ftrace_regex_release(struct inode *inode, struct file *file) | |||
2484 | { | 2666 | { |
2485 | struct seq_file *m = (struct seq_file *)file->private_data; | 2667 | struct seq_file *m = (struct seq_file *)file->private_data; |
2486 | struct ftrace_iterator *iter; | 2668 | struct ftrace_iterator *iter; |
2669 | struct ftrace_hash **orig_hash; | ||
2487 | struct trace_parser *parser; | 2670 | struct trace_parser *parser; |
2671 | int ret; | ||
2488 | 2672 | ||
2489 | mutex_lock(&ftrace_regex_lock); | 2673 | mutex_lock(&ftrace_regex_lock); |
2490 | if (file->f_mode & FMODE_READ) { | 2674 | if (file->f_mode & FMODE_READ) { |
@@ -2501,14 +2685,21 @@ ftrace_regex_release(struct inode *inode, struct file *file) | |||
2501 | } | 2685 | } |
2502 | 2686 | ||
2503 | trace_parser_put(parser); | 2687 | trace_parser_put(parser); |
2504 | kfree(iter); | ||
2505 | 2688 | ||
2506 | if (file->f_mode & FMODE_WRITE) { | 2689 | if (file->f_mode & FMODE_WRITE) { |
2690 | if (iter->flags & FTRACE_ITER_NOTRACE) | ||
2691 | orig_hash = &iter->ops->notrace_hash; | ||
2692 | else | ||
2693 | orig_hash = &iter->ops->filter_hash; | ||
2694 | |||
2507 | mutex_lock(&ftrace_lock); | 2695 | mutex_lock(&ftrace_lock); |
2508 | if (ftrace_start_up && ftrace_enabled) | 2696 | ret = ftrace_hash_move(orig_hash, iter->hash); |
2697 | if (!ret && ftrace_start_up && ftrace_enabled) | ||
2509 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 2698 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
2510 | mutex_unlock(&ftrace_lock); | 2699 | mutex_unlock(&ftrace_lock); |
2511 | } | 2700 | } |
2701 | free_ftrace_hash(iter->hash); | ||
2702 | kfree(iter); | ||
2512 | 2703 | ||
2513 | mutex_unlock(&ftrace_regex_lock); | 2704 | mutex_unlock(&ftrace_regex_lock); |
2514 | return 0; | 2705 | return 0; |