aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-05-04 09:27:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-05-18 15:29:50 -0400
commitb848914ce39589d89ee0078a6d1ef452b464729e (patch)
tree542bf09ae3c2d9118833132621585fb458e2a003 /kernel/trace
parent07fd5515f3b5c20704707f63e7f4485b534508a8 (diff)
ftrace: Implement separate user function filtering
ftrace_ops that are registered to trace functions can now be agnostic to each other in respect to what functions they trace. Each ops has their own hash of the functions they want to trace and a hash to what they do not want to trace. A empty hash for the functions they want to trace denotes all functions should be traced that are not in the notrace hash. Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c193
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c1
-rw-r--r--kernel/trace/trace_sched_wakeup.c1
-rw-r--r--kernel/trace/trace_stack.c1
5 files changed, 160 insertions, 38 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 92b6fdf49ae5..6c7e1df39b57 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -87,24 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
87 .func = ftrace_stub, 87 .func = ftrace_stub,
88}; 88};
89 89
90static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 90static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
91ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 92ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 93ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 94ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94static struct ftrace_ops global_ops; 95static struct ftrace_ops global_ops;
95 96
97static void
98ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
99
96/* 100/*
97 * Traverse the ftrace_list, invoking all entries. The reason that we 101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
98 * can use rcu_dereference_raw() is that elements removed from this list 102 * can use rcu_dereference_raw() is that elements removed from this list
99 * are simply leaked, so there is no need to interact with a grace-period 103 * are simply leaked, so there is no need to interact with a grace-period
100 * mechanism. The rcu_dereference_raw() calls are needed to handle 104 * mechanism. The rcu_dereference_raw() calls are needed to handle
101 * concurrent insertions into the ftrace_list. 105 * concurrent insertions into the ftrace_global_list.
102 * 106 *
103 * Silly Alpha and silly pointer-speculation compiler optimizations! 107 * Silly Alpha and silly pointer-speculation compiler optimizations!
104 */ 108 */
105static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 109static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
106{ 111{
107 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ 112 struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
108 113
109 while (op != &ftrace_list_end) { 114 while (op != &ftrace_list_end) {
110 op->func(ip, parent_ip); 115 op->func(ip, parent_ip);
@@ -163,11 +168,11 @@ static void update_global_ops(void)
163 * function directly. Otherwise, we need to iterate over the 168 * function directly. Otherwise, we need to iterate over the
164 * registered callers. 169 * registered callers.
165 */ 170 */
166 if (ftrace_list == &ftrace_list_end || 171 if (ftrace_global_list == &ftrace_list_end ||
167 ftrace_list->next == &ftrace_list_end) 172 ftrace_global_list->next == &ftrace_list_end)
168 func = ftrace_list->func; 173 func = ftrace_global_list->func;
169 else 174 else
170 func = ftrace_list_func; 175 func = ftrace_global_list_func;
171 176
172 /* If we filter on pids, update to use the pid function */ 177 /* If we filter on pids, update to use the pid function */
173 if (!list_empty(&ftrace_pids)) { 178 if (!list_empty(&ftrace_pids)) {
@@ -184,7 +189,11 @@ static void update_ftrace_function(void)
184 189
185 update_global_ops(); 190 update_global_ops();
186 191
187 func = global_ops.func; 192 if (ftrace_ops_list == &ftrace_list_end ||
193 ftrace_ops_list->next == &ftrace_list_end)
194 func = ftrace_ops_list->func;
195 else
196 func = ftrace_ops_list_func;
188 197
189#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 198#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
190 ftrace_trace_function = func; 199 ftrace_trace_function = func;
@@ -198,10 +207,10 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
198{ 207{
199 ops->next = *list; 208 ops->next = *list;
200 /* 209 /*
201 * We are entering ops into the ftrace_list but another 210 * We are entering ops into the list but another
202 * CPU might be walking that list. We need to make sure 211 * CPU might be walking that list. We need to make sure
203 * the ops->next pointer is valid before another CPU sees 212 * the ops->next pointer is valid before another CPU sees
204 * the ops pointer included into the ftrace_list. 213 * the ops pointer included into the list.
205 */ 214 */
206 rcu_assign_pointer(*list, ops); 215 rcu_assign_pointer(*list, ops);
207} 216}
@@ -238,7 +247,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
238 if (FTRACE_WARN_ON(ops == &global_ops)) 247 if (FTRACE_WARN_ON(ops == &global_ops))
239 return -EINVAL; 248 return -EINVAL;
240 249
241 add_ftrace_ops(&ftrace_list, ops); 250 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
251 return -EBUSY;
252
253 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
254 int first = ftrace_global_list == &ftrace_list_end;
255 add_ftrace_ops(&ftrace_global_list, ops);
256 ops->flags |= FTRACE_OPS_FL_ENABLED;
257 if (first)
258 add_ftrace_ops(&ftrace_ops_list, &global_ops);
259 } else
260 add_ftrace_ops(&ftrace_ops_list, ops);
261
242 if (ftrace_enabled) 262 if (ftrace_enabled)
243 update_ftrace_function(); 263 update_ftrace_function();
244 264
@@ -252,12 +272,24 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
252 if (ftrace_disabled) 272 if (ftrace_disabled)
253 return -ENODEV; 273 return -ENODEV;
254 274
275 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
276 return -EBUSY;
277
255 if (FTRACE_WARN_ON(ops == &global_ops)) 278 if (FTRACE_WARN_ON(ops == &global_ops))
256 return -EINVAL; 279 return -EINVAL;
257 280
258 ret = remove_ftrace_ops(&ftrace_list, ops); 281 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
282 ret = remove_ftrace_ops(&ftrace_global_list, ops);
283 if (!ret && ftrace_global_list == &ftrace_list_end)
284 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
285 if (!ret)
286 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
287 } else
288 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
289
259 if (ret < 0) 290 if (ret < 0)
260 return ret; 291 return ret;
292
261 if (ftrace_enabled) 293 if (ftrace_enabled)
262 update_ftrace_function(); 294 update_ftrace_function();
263 295
@@ -928,10 +960,6 @@ static const struct ftrace_hash empty_hash = {
928}; 960};
929#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 961#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
930 962
931enum {
932 FTRACE_OPS_FL_ENABLED = 1,
933};
934
935static struct ftrace_ops global_ops = { 963static struct ftrace_ops global_ops = {
936 .func = ftrace_stub, 964 .func = ftrace_stub,
937 .notrace_hash = EMPTY_HASH, 965 .notrace_hash = EMPTY_HASH,
@@ -1190,6 +1218,40 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1190} 1218}
1191 1219
1192/* 1220/*
1221 * Test the hashes for this ops to see if we want to call
1222 * the ops->func or not.
1223 *
1224 * It's a match if the ip is in the ops->filter_hash or
1225 * the filter_hash does not exist or is empty,
1226 * AND
1227 * the ip is not in the ops->notrace_hash.
1228 */
1229static int
1230ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1231{
1232 struct ftrace_hash *filter_hash;
1233 struct ftrace_hash *notrace_hash;
1234 int ret;
1235
1236 /* The hashes are freed with call_rcu_sched() */
1237 preempt_disable_notrace();
1238
1239 filter_hash = rcu_dereference_raw(ops->filter_hash);
1240 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1241
1242 if ((!filter_hash || !filter_hash->count ||
1243 ftrace_lookup_ip(filter_hash, ip)) &&
1244 (!notrace_hash || !notrace_hash->count ||
1245 !ftrace_lookup_ip(notrace_hash, ip)))
1246 ret = 1;
1247 else
1248 ret = 0;
1249 preempt_enable_notrace();
1250
1251 return ret;
1252}
1253
1254/*
1193 * This is a double for. Do not use 'break' to break out of the loop, 1255 * This is a double for. Do not use 'break' to break out of the loop,
1194 * you must use a goto. 1256 * you must use a goto.
1195 */ 1257 */
@@ -1232,7 +1294,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1232 if (filter_hash) { 1294 if (filter_hash) {
1233 hash = ops->filter_hash; 1295 hash = ops->filter_hash;
1234 other_hash = ops->notrace_hash; 1296 other_hash = ops->notrace_hash;
1235 if (!hash->count) 1297 if (!hash || !hash->count)
1236 all = 1; 1298 all = 1;
1237 } else { 1299 } else {
1238 inc = !inc; 1300 inc = !inc;
@@ -1242,7 +1304,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1242 * If the notrace hash has no items, 1304 * If the notrace hash has no items,
1243 * then there's nothing to do. 1305 * then there's nothing to do.
1244 */ 1306 */
1245 if (!hash->count) 1307 if (hash && !hash->count)
1246 return; 1308 return;
1247 } 1309 }
1248 1310
@@ -1256,11 +1318,11 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1256 * Only the filter_hash affects all records. 1318 * Only the filter_hash affects all records.
1257 * Update if the record is not in the notrace hash. 1319 * Update if the record is not in the notrace hash.
1258 */ 1320 */
1259 if (!ftrace_lookup_ip(other_hash, rec->ip)) 1321 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1260 match = 1; 1322 match = 1;
1261 } else { 1323 } else {
1262 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1324 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1263 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1325 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1264 1326
1265 /* 1327 /*
1266 * 1328 *
@@ -1546,6 +1608,7 @@ static void ftrace_run_update_code(int command)
1546 1608
1547static ftrace_func_t saved_ftrace_func; 1609static ftrace_func_t saved_ftrace_func;
1548static int ftrace_start_up; 1610static int ftrace_start_up;
1611static int global_start_up;
1549 1612
1550static void ftrace_startup_enable(int command) 1613static void ftrace_startup_enable(int command)
1551{ 1614{
@@ -1562,14 +1625,25 @@ static void ftrace_startup_enable(int command)
1562 1625
1563static void ftrace_startup(struct ftrace_ops *ops, int command) 1626static void ftrace_startup(struct ftrace_ops *ops, int command)
1564{ 1627{
1628 bool hash_enable = true;
1629
1565 if (unlikely(ftrace_disabled)) 1630 if (unlikely(ftrace_disabled))
1566 return; 1631 return;
1567 1632
1568 ftrace_start_up++; 1633 ftrace_start_up++;
1569 command |= FTRACE_ENABLE_CALLS; 1634 command |= FTRACE_ENABLE_CALLS;
1570 1635
1636 /* ops marked global share the filter hashes */
1637 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1638 ops = &global_ops;
1639 /* Don't update hash if global is already set */
1640 if (global_start_up)
1641 hash_enable = false;
1642 global_start_up++;
1643 }
1644
1571 ops->flags |= FTRACE_OPS_FL_ENABLED; 1645 ops->flags |= FTRACE_OPS_FL_ENABLED;
1572 if (ftrace_start_up == 1) 1646 if (hash_enable)
1573 ftrace_hash_rec_enable(ops, 1); 1647 ftrace_hash_rec_enable(ops, 1);
1574 1648
1575 ftrace_startup_enable(command); 1649 ftrace_startup_enable(command);
@@ -1577,6 +1651,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
1577 1651
1578static void ftrace_shutdown(struct ftrace_ops *ops, int command) 1652static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1579{ 1653{
1654 bool hash_disable = true;
1655
1580 if (unlikely(ftrace_disabled)) 1656 if (unlikely(ftrace_disabled))
1581 return; 1657 return;
1582 1658
@@ -1588,13 +1664,25 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1588 */ 1664 */
1589 WARN_ON_ONCE(ftrace_start_up < 0); 1665 WARN_ON_ONCE(ftrace_start_up < 0);
1590 1666
1591 if (!ftrace_start_up) 1667 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1668 ops = &global_ops;
1669 global_start_up--;
1670 WARN_ON_ONCE(global_start_up < 0);
1671 /* Don't update hash if global still has users */
1672 if (global_start_up) {
1673 WARN_ON_ONCE(!ftrace_start_up);
1674 hash_disable = false;
1675 }
1676 }
1677
1678 if (hash_disable)
1592 ftrace_hash_rec_disable(ops, 1); 1679 ftrace_hash_rec_disable(ops, 1);
1593 1680
1594 if (!ftrace_start_up) { 1681 if (ops != &global_ops || !global_start_up)
1595 command |= FTRACE_DISABLE_CALLS;
1596 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 1682 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1597 } 1683
1684 if (!ftrace_start_up)
1685 command |= FTRACE_DISABLE_CALLS;
1598 1686
1599 if (saved_ftrace_func != ftrace_trace_function) { 1687 if (saved_ftrace_func != ftrace_trace_function) {
1600 saved_ftrace_func = ftrace_trace_function; 1688 saved_ftrace_func = ftrace_trace_function;
@@ -2381,6 +2469,7 @@ static int ftrace_probe_registered;
2381 2469
2382static void __enable_ftrace_function_probe(void) 2470static void __enable_ftrace_function_probe(void)
2383{ 2471{
2472 int ret;
2384 int i; 2473 int i;
2385 2474
2386 if (ftrace_probe_registered) 2475 if (ftrace_probe_registered)
@@ -2395,13 +2484,16 @@ static void __enable_ftrace_function_probe(void)
2395 if (i == FTRACE_FUNC_HASHSIZE) 2484 if (i == FTRACE_FUNC_HASHSIZE)
2396 return; 2485 return;
2397 2486
2398 __register_ftrace_function(&trace_probe_ops); 2487 ret = __register_ftrace_function(&trace_probe_ops);
2399 ftrace_startup(&global_ops, 0); 2488 if (!ret)
2489 ftrace_startup(&trace_probe_ops, 0);
2490
2400 ftrace_probe_registered = 1; 2491 ftrace_probe_registered = 1;
2401} 2492}
2402 2493
2403static void __disable_ftrace_function_probe(void) 2494static void __disable_ftrace_function_probe(void)
2404{ 2495{
2496 int ret;
2405 int i; 2497 int i;
2406 2498
2407 if (!ftrace_probe_registered) 2499 if (!ftrace_probe_registered)
@@ -2414,8 +2506,10 @@ static void __disable_ftrace_function_probe(void)
2414 } 2506 }
2415 2507
2416 /* no more funcs left */ 2508 /* no more funcs left */
2417 __unregister_ftrace_function(&trace_probe_ops); 2509 ret = __unregister_ftrace_function(&trace_probe_ops);
2418 ftrace_shutdown(&global_ops, 0); 2510 if (!ret)
2511 ftrace_shutdown(&trace_probe_ops, 0);
2512
2419 ftrace_probe_registered = 0; 2513 ftrace_probe_registered = 0;
2420} 2514}
2421 2515
@@ -3319,8 +3413,28 @@ static inline void ftrace_startup_enable(int command) { }
3319# define ftrace_shutdown(ops, command) do { } while (0) 3413# define ftrace_shutdown(ops, command) do { } while (0)
3320# define ftrace_startup_sysctl() do { } while (0) 3414# define ftrace_startup_sysctl() do { } while (0)
3321# define ftrace_shutdown_sysctl() do { } while (0) 3415# define ftrace_shutdown_sysctl() do { } while (0)
3416
3417static inline int
3418ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3419{
3420 return 1;
3421}
3422
3322#endif /* CONFIG_DYNAMIC_FTRACE */ 3423#endif /* CONFIG_DYNAMIC_FTRACE */
3323 3424
3425static void
3426ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3427{
3428 /* see comment above ftrace_global_list_func */
3429 struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
3430
3431 while (op != &ftrace_list_end) {
3432 if (ftrace_ops_test(op, ip))
3433 op->func(ip, parent_ip);
3434 op = rcu_dereference_raw(op->next);
3435 };
3436}
3437
3324static void clear_ftrace_swapper(void) 3438static void clear_ftrace_swapper(void)
3325{ 3439{
3326 struct task_struct *p; 3440 struct task_struct *p;
@@ -3621,7 +3735,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
3621 goto out_unlock; 3735 goto out_unlock;
3622 3736
3623 ret = __register_ftrace_function(ops); 3737 ret = __register_ftrace_function(ops);
3624 ftrace_startup(&global_ops, 0); 3738 if (!ret)
3739 ftrace_startup(ops, 0);
3740
3625 3741
3626 out_unlock: 3742 out_unlock:
3627 mutex_unlock(&ftrace_lock); 3743 mutex_unlock(&ftrace_lock);
@@ -3640,7 +3756,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
3640 3756
3641 mutex_lock(&ftrace_lock); 3757 mutex_lock(&ftrace_lock);
3642 ret = __unregister_ftrace_function(ops); 3758 ret = __unregister_ftrace_function(ops);
3643 ftrace_shutdown(&global_ops, 0); 3759 if (!ret)
3760 ftrace_shutdown(ops, 0);
3644 mutex_unlock(&ftrace_lock); 3761 mutex_unlock(&ftrace_lock);
3645 3762
3646 return ret; 3763 return ret;
@@ -3670,11 +3787,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3670 ftrace_startup_sysctl(); 3787 ftrace_startup_sysctl();
3671 3788
3672 /* we are starting ftrace again */ 3789 /* we are starting ftrace again */
3673 if (ftrace_list != &ftrace_list_end) { 3790 if (ftrace_ops_list != &ftrace_list_end) {
3674 if (ftrace_list->next == &ftrace_list_end) 3791 if (ftrace_ops_list->next == &ftrace_list_end)
3675 ftrace_trace_function = ftrace_list->func; 3792 ftrace_trace_function = ftrace_ops_list->func;
3676 else 3793 else
3677 ftrace_trace_function = ftrace_list_func; 3794 ftrace_trace_function = ftrace_ops_list_func;
3678 } 3795 }
3679 3796
3680 } else { 3797 } else {
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 16aee4d44e8f..8d0e1cc4e974 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
149static struct ftrace_ops trace_ops __read_mostly = 149static struct ftrace_ops trace_ops __read_mostly =
150{ 150{
151 .func = function_trace_call, 151 .func = function_trace_call,
152 .flags = FTRACE_OPS_FL_GLOBAL,
152}; 153};
153 154
154static struct ftrace_ops trace_stack_ops __read_mostly = 155static struct ftrace_ops trace_stack_ops __read_mostly =
155{ 156{
156 .func = function_stack_trace_call, 157 .func = function_stack_trace_call,
158 .flags = FTRACE_OPS_FL_GLOBAL,
157}; 159};
158 160
159/* Our two options */ 161/* Our two options */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a4969b47afc1..c77424be284d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
153static struct ftrace_ops trace_ops __read_mostly = 153static struct ftrace_ops trace_ops __read_mostly =
154{ 154{
155 .func = irqsoff_tracer_call, 155 .func = irqsoff_tracer_call,
156 .flags = FTRACE_OPS_FL_GLOBAL,
156}; 157};
157#endif /* CONFIG_FUNCTION_TRACER */ 158#endif /* CONFIG_FUNCTION_TRACER */
158 159
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7319559ed59f..f029dd4fd2ca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
129static struct ftrace_ops trace_ops __read_mostly = 129static struct ftrace_ops trace_ops __read_mostly =
130{ 130{
131 .func = wakeup_tracer_call, 131 .func = wakeup_tracer_call,
132 .flags = FTRACE_OPS_FL_GLOBAL,
132}; 133};
133#endif /* CONFIG_FUNCTION_TRACER */ 134#endif /* CONFIG_FUNCTION_TRACER */
134 135
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 4c5dead0c239..b0b53b8e4c25 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
133static struct ftrace_ops trace_ops __read_mostly = 133static struct ftrace_ops trace_ops __read_mostly =
134{ 134{
135 .func = stack_trace_call, 135 .func = stack_trace_call,
136 .flags = FTRACE_OPS_FL_GLOBAL,
136}; 137};
137 138
138static ssize_t 139static ssize_t