aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 15:20:51 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:58:28 -0400
commite309b41dd65aa953f86765eeeecc941d8e1e8b8f (patch)
tree295d4ed6e2a766607f889a04b977ca27cc24929e /kernel
parentb53dde9d34f2df396540988ebc65c33400f57b04 (diff)
ftrace: remove notrace
now that we have a kbuild method for notrace, no need to pollute the C code with the annotations. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c66
-rw-r--r--kernel/trace/trace.c94
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_functions.c12
-rw-r--r--kernel/trace/trace_irqsoff.c40
-rw-r--r--kernel/trace/trace_sched_switch.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c28
-rw-r--r--kernel/trace/trace_selftest.c2
8 files changed, 130 insertions, 130 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 57350cbd1f61..281d97a3208c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -53,7 +53,7 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
53/* mcount is defined per arch in assembly */ 53/* mcount is defined per arch in assembly */
54EXPORT_SYMBOL(mcount); 54EXPORT_SYMBOL(mcount);
55 55
56notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 56void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57{ 57{
58 struct ftrace_ops *op = ftrace_list; 58 struct ftrace_ops *op = ftrace_list;
59 59
@@ -79,7 +79,7 @@ void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub; 79 ftrace_trace_function = ftrace_stub;
80} 80}
81 81
82static int notrace __register_ftrace_function(struct ftrace_ops *ops) 82static int __register_ftrace_function(struct ftrace_ops *ops)
83{ 83{
84 /* Should never be called by interrupts */ 84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock); 85 spin_lock(&ftrace_lock);
@@ -110,7 +110,7 @@ static int notrace __register_ftrace_function(struct ftrace_ops *ops)
110 return 0; 110 return 0;
111} 111}
112 112
113static int notrace __unregister_ftrace_function(struct ftrace_ops *ops) 113static int __unregister_ftrace_function(struct ftrace_ops *ops)
114{ 114{
115 struct ftrace_ops **p; 115 struct ftrace_ops **p;
116 int ret = 0; 116 int ret = 0;
@@ -197,7 +197,7 @@ static int ftrace_record_suspend;
197 197
198static struct dyn_ftrace *ftrace_free_records; 198static struct dyn_ftrace *ftrace_free_records;
199 199
200static inline int notrace 200static inline int
201ftrace_ip_in_hash(unsigned long ip, unsigned long key) 201ftrace_ip_in_hash(unsigned long ip, unsigned long key)
202{ 202{
203 struct dyn_ftrace *p; 203 struct dyn_ftrace *p;
@@ -214,13 +214,13 @@ ftrace_ip_in_hash(unsigned long ip, unsigned long key)
214 return found; 214 return found;
215} 215}
216 216
217static inline void notrace 217static inline void
218ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) 218ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
219{ 219{
220 hlist_add_head(&node->node, &ftrace_hash[key]); 220 hlist_add_head(&node->node, &ftrace_hash[key]);
221} 221}
222 222
223static notrace void ftrace_free_rec(struct dyn_ftrace *rec) 223static void ftrace_free_rec(struct dyn_ftrace *rec)
224{ 224{
225 /* no locking, only called from kstop_machine */ 225 /* no locking, only called from kstop_machine */
226 226
@@ -229,7 +229,7 @@ static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
229 rec->flags |= FTRACE_FL_FREE; 229 rec->flags |= FTRACE_FL_FREE;
230} 230}
231 231
232static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 232static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
233{ 233{
234 struct dyn_ftrace *rec; 234 struct dyn_ftrace *rec;
235 235
@@ -259,7 +259,7 @@ static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
259 return &ftrace_pages->records[ftrace_pages->index++]; 259 return &ftrace_pages->records[ftrace_pages->index++];
260} 260}
261 261
262static void notrace 262static void
263ftrace_record_ip(unsigned long ip) 263ftrace_record_ip(unsigned long ip)
264{ 264{
265 struct dyn_ftrace *node; 265 struct dyn_ftrace *node;
@@ -329,7 +329,7 @@ ftrace_record_ip(unsigned long ip)
329#define FTRACE_ADDR ((long)(ftrace_caller)) 329#define FTRACE_ADDR ((long)(ftrace_caller))
330#define MCOUNT_ADDR ((long)(mcount)) 330#define MCOUNT_ADDR ((long)(mcount))
331 331
332static void notrace 332static void
333__ftrace_replace_code(struct dyn_ftrace *rec, 333__ftrace_replace_code(struct dyn_ftrace *rec,
334 unsigned char *old, unsigned char *new, int enable) 334 unsigned char *old, unsigned char *new, int enable)
335{ 335{
@@ -405,7 +405,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
405 } 405 }
406} 406}
407 407
408static void notrace ftrace_replace_code(int enable) 408static void ftrace_replace_code(int enable)
409{ 409{
410 unsigned char *new = NULL, *old = NULL; 410 unsigned char *new = NULL, *old = NULL;
411 struct dyn_ftrace *rec; 411 struct dyn_ftrace *rec;
@@ -430,7 +430,7 @@ static void notrace ftrace_replace_code(int enable)
430 } 430 }
431} 431}
432 432
433static notrace void ftrace_shutdown_replenish(void) 433static void ftrace_shutdown_replenish(void)
434{ 434{
435 if (ftrace_pages->next) 435 if (ftrace_pages->next)
436 return; 436 return;
@@ -439,7 +439,7 @@ static notrace void ftrace_shutdown_replenish(void)
439 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 439 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
440} 440}
441 441
442static notrace void 442static void
443ftrace_code_disable(struct dyn_ftrace *rec) 443ftrace_code_disable(struct dyn_ftrace *rec)
444{ 444{
445 unsigned long ip; 445 unsigned long ip;
@@ -458,7 +458,7 @@ ftrace_code_disable(struct dyn_ftrace *rec)
458 } 458 }
459} 459}
460 460
461static int notrace __ftrace_modify_code(void *data) 461static int __ftrace_modify_code(void *data)
462{ 462{
463 unsigned long addr; 463 unsigned long addr;
464 int *command = data; 464 int *command = data;
@@ -482,14 +482,14 @@ static int notrace __ftrace_modify_code(void *data)
482 return 0; 482 return 0;
483} 483}
484 484
485static void notrace ftrace_run_update_code(int command) 485static void ftrace_run_update_code(int command)
486{ 486{
487 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); 487 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
488} 488}
489 489
490static ftrace_func_t saved_ftrace_func; 490static ftrace_func_t saved_ftrace_func;
491 491
492static void notrace ftrace_startup(void) 492static void ftrace_startup(void)
493{ 493{
494 int command = 0; 494 int command = 0;
495 495
@@ -514,7 +514,7 @@ static void notrace ftrace_startup(void)
514 mutex_unlock(&ftraced_lock); 514 mutex_unlock(&ftraced_lock);
515} 515}
516 516
517static void notrace ftrace_shutdown(void) 517static void ftrace_shutdown(void)
518{ 518{
519 int command = 0; 519 int command = 0;
520 520
@@ -539,7 +539,7 @@ static void notrace ftrace_shutdown(void)
539 mutex_unlock(&ftraced_lock); 539 mutex_unlock(&ftraced_lock);
540} 540}
541 541
542static void notrace ftrace_startup_sysctl(void) 542static void ftrace_startup_sysctl(void)
543{ 543{
544 int command = FTRACE_ENABLE_MCOUNT; 544 int command = FTRACE_ENABLE_MCOUNT;
545 545
@@ -557,7 +557,7 @@ static void notrace ftrace_startup_sysctl(void)
557 mutex_unlock(&ftraced_lock); 557 mutex_unlock(&ftraced_lock);
558} 558}
559 559
560static void notrace ftrace_shutdown_sysctl(void) 560static void ftrace_shutdown_sysctl(void)
561{ 561{
562 int command = FTRACE_DISABLE_MCOUNT; 562 int command = FTRACE_DISABLE_MCOUNT;
563 563
@@ -577,7 +577,7 @@ static cycle_t ftrace_update_time;
577static unsigned long ftrace_update_cnt; 577static unsigned long ftrace_update_cnt;
578unsigned long ftrace_update_tot_cnt; 578unsigned long ftrace_update_tot_cnt;
579 579
580static int notrace __ftrace_update_code(void *ignore) 580static int __ftrace_update_code(void *ignore)
581{ 581{
582 struct dyn_ftrace *p; 582 struct dyn_ftrace *p;
583 struct hlist_head head; 583 struct hlist_head head;
@@ -618,7 +618,7 @@ static int notrace __ftrace_update_code(void *ignore)
618 return 0; 618 return 0;
619} 619}
620 620
621static void notrace ftrace_update_code(void) 621static void ftrace_update_code(void)
622{ 622{
623 if (unlikely(ftrace_disabled)) 623 if (unlikely(ftrace_disabled))
624 return; 624 return;
@@ -626,7 +626,7 @@ static void notrace ftrace_update_code(void)
626 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); 626 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
627} 627}
628 628
629static int notrace ftraced(void *ignore) 629static int ftraced(void *ignore)
630{ 630{
631 unsigned long usecs; 631 unsigned long usecs;
632 632
@@ -733,7 +733,7 @@ struct ftrace_iterator {
733 unsigned filtered; 733 unsigned filtered;
734}; 734};
735 735
736static void notrace * 736static void *
737t_next(struct seq_file *m, void *v, loff_t *pos) 737t_next(struct seq_file *m, void *v, loff_t *pos)
738{ 738{
739 struct ftrace_iterator *iter = m->private; 739 struct ftrace_iterator *iter = m->private;
@@ -806,7 +806,7 @@ static struct seq_operations show_ftrace_seq_ops = {
806 .show = t_show, 806 .show = t_show,
807}; 807};
808 808
809static int notrace 809static int
810ftrace_avail_open(struct inode *inode, struct file *file) 810ftrace_avail_open(struct inode *inode, struct file *file)
811{ 811{
812 struct ftrace_iterator *iter; 812 struct ftrace_iterator *iter;
@@ -845,7 +845,7 @@ int ftrace_avail_release(struct inode *inode, struct file *file)
845 return 0; 845 return 0;
846} 846}
847 847
848static void notrace ftrace_filter_reset(void) 848static void ftrace_filter_reset(void)
849{ 849{
850 struct ftrace_page *pg; 850 struct ftrace_page *pg;
851 struct dyn_ftrace *rec; 851 struct dyn_ftrace *rec;
@@ -867,7 +867,7 @@ static void notrace ftrace_filter_reset(void)
867 preempt_enable(); 867 preempt_enable();
868} 868}
869 869
870static int notrace 870static int
871ftrace_filter_open(struct inode *inode, struct file *file) 871ftrace_filter_open(struct inode *inode, struct file *file)
872{ 872{
873 struct ftrace_iterator *iter; 873 struct ftrace_iterator *iter;
@@ -903,7 +903,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
903 return ret; 903 return ret;
904} 904}
905 905
906static ssize_t notrace 906static ssize_t
907ftrace_filter_read(struct file *file, char __user *ubuf, 907ftrace_filter_read(struct file *file, char __user *ubuf,
908 size_t cnt, loff_t *ppos) 908 size_t cnt, loff_t *ppos)
909{ 909{
@@ -913,7 +913,7 @@ ftrace_filter_read(struct file *file, char __user *ubuf,
913 return -EPERM; 913 return -EPERM;
914} 914}
915 915
916static loff_t notrace 916static loff_t
917ftrace_filter_lseek(struct file *file, loff_t offset, int origin) 917ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
918{ 918{
919 loff_t ret; 919 loff_t ret;
@@ -933,7 +933,7 @@ enum {
933 MATCH_END_ONLY, 933 MATCH_END_ONLY,
934}; 934};
935 935
936static void notrace 936static void
937ftrace_match(unsigned char *buff, int len) 937ftrace_match(unsigned char *buff, int len)
938{ 938{
939 char str[KSYM_SYMBOL_LEN]; 939 char str[KSYM_SYMBOL_LEN];
@@ -1002,7 +1002,7 @@ ftrace_match(unsigned char *buff, int len)
1002 preempt_enable(); 1002 preempt_enable();
1003} 1003}
1004 1004
1005static ssize_t notrace 1005static ssize_t
1006ftrace_filter_write(struct file *file, const char __user *ubuf, 1006ftrace_filter_write(struct file *file, const char __user *ubuf,
1007 size_t cnt, loff_t *ppos) 1007 size_t cnt, loff_t *ppos)
1008{ 1008{
@@ -1094,7 +1094,7 @@ ftrace_filter_write(struct file *file, const char __user *ubuf,
1094 * Filters denote which functions should be enabled when tracing is enabled. 1094 * Filters denote which functions should be enabled when tracing is enabled.
1095 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 1095 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1096 */ 1096 */
1097notrace void ftrace_set_filter(unsigned char *buf, int len, int reset) 1097void ftrace_set_filter(unsigned char *buf, int len, int reset)
1098{ 1098{
1099 if (unlikely(ftrace_disabled)) 1099 if (unlikely(ftrace_disabled))
1100 return; 1100 return;
@@ -1107,7 +1107,7 @@ notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
1107 mutex_unlock(&ftrace_filter_lock); 1107 mutex_unlock(&ftrace_filter_lock);
1108} 1108}
1109 1109
1110static int notrace 1110static int
1111ftrace_filter_release(struct inode *inode, struct file *file) 1111ftrace_filter_release(struct inode *inode, struct file *file)
1112{ 1112{
1113 struct seq_file *m = (struct seq_file *)file->private_data; 1113 struct seq_file *m = (struct seq_file *)file->private_data;
@@ -1242,7 +1242,7 @@ static __init int ftrace_init_debugfs(void)
1242 1242
1243fs_initcall(ftrace_init_debugfs); 1243fs_initcall(ftrace_init_debugfs);
1244 1244
1245static int __init notrace ftrace_dynamic_init(void) 1245static int __init ftrace_dynamic_init(void)
1246{ 1246{
1247 struct task_struct *p; 1247 struct task_struct *p;
1248 unsigned long addr; 1248 unsigned long addr;
@@ -1352,7 +1352,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1352 return ret; 1352 return ret;
1353} 1353}
1354 1354
1355notrace int 1355int
1356ftrace_enable_sysctl(struct ctl_table *table, int write, 1356ftrace_enable_sysctl(struct ctl_table *table, int write,
1357 struct file *file, void __user *buffer, size_t *lenp, 1357 struct file *file, void __user *buffer, size_t *lenp,
1358 loff_t *ppos) 1358 loff_t *ppos)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9022c357032a..f5898051fdd9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -35,7 +35,7 @@ unsigned long __read_mostly tracing_thresh;
35 35
36static int tracing_disabled = 1; 36static int tracing_disabled = 1;
37 37
38static long notrace 38static long
39ns2usecs(cycle_t nsec) 39ns2usecs(cycle_t nsec)
40{ 40{
41 nsec += 500; 41 nsec += 500;
@@ -43,7 +43,7 @@ ns2usecs(cycle_t nsec)
43 return nsec; 43 return nsec;
44} 44}
45 45
46notrace cycle_t ftrace_now(int cpu) 46cycle_t ftrace_now(int cpu)
47{ 47{
48 return cpu_clock(cpu); 48 return cpu_clock(cpu);
49} 49}
@@ -135,7 +135,7 @@ static DEFINE_SPINLOCK(ftrace_max_lock);
135 * structure. (this way the maximum trace is permanently saved, 135 * structure. (this way the maximum trace is permanently saved,
136 * for later retrieval via /debugfs/tracing/latency_trace) 136 * for later retrieval via /debugfs/tracing/latency_trace)
137 */ 137 */
138static notrace void 138static void
139__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 139__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
140{ 140{
141 struct trace_array_cpu *data = tr->data[cpu]; 141 struct trace_array_cpu *data = tr->data[cpu];
@@ -184,7 +184,7 @@ void *head_page(struct trace_array_cpu *data)
184 return page_address(page); 184 return page_address(page);
185} 185}
186 186
187static notrace int 187static int
188trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 188trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
189{ 189{
190 int len = (PAGE_SIZE - 1) - s->len; 190 int len = (PAGE_SIZE - 1) - s->len;
@@ -207,7 +207,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
207 return len; 207 return len;
208} 208}
209 209
210static notrace int 210static int
211trace_seq_puts(struct trace_seq *s, const char *str) 211trace_seq_puts(struct trace_seq *s, const char *str)
212{ 212{
213 int len = strlen(str); 213 int len = strlen(str);
@@ -221,7 +221,7 @@ trace_seq_puts(struct trace_seq *s, const char *str)
221 return len; 221 return len;
222} 222}
223 223
224static notrace int 224static int
225trace_seq_putc(struct trace_seq *s, unsigned char c) 225trace_seq_putc(struct trace_seq *s, unsigned char c)
226{ 226{
227 if (s->len >= (PAGE_SIZE - 1)) 227 if (s->len >= (PAGE_SIZE - 1))
@@ -232,7 +232,7 @@ trace_seq_putc(struct trace_seq *s, unsigned char c)
232 return 1; 232 return 1;
233} 233}
234 234
235static notrace int 235static int
236trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) 236trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
237{ 237{
238 if (len > ((PAGE_SIZE - 1) - s->len)) 238 if (len > ((PAGE_SIZE - 1) - s->len))
@@ -246,7 +246,7 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
246 246
247#define HEX_CHARS 17 247#define HEX_CHARS 17
248 248
249static notrace int 249static int
250trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 250trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
251{ 251{
252 unsigned char hex[HEX_CHARS]; 252 unsigned char hex[HEX_CHARS];
@@ -285,13 +285,13 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
285 return trace_seq_putmem(s, hex, j); 285 return trace_seq_putmem(s, hex, j);
286} 286}
287 287
288static notrace void 288static void
289trace_seq_reset(struct trace_seq *s) 289trace_seq_reset(struct trace_seq *s)
290{ 290{
291 s->len = 0; 291 s->len = 0;
292} 292}
293 293
294static notrace void 294static void
295trace_print_seq(struct seq_file *m, struct trace_seq *s) 295trace_print_seq(struct seq_file *m, struct trace_seq *s)
296{ 296{
297 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 297 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
@@ -302,7 +302,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
302 trace_seq_reset(s); 302 trace_seq_reset(s);
303} 303}
304 304
305notrace static void 305static void
306flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) 306flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
307{ 307{
308 struct list_head flip_pages; 308 struct list_head flip_pages;
@@ -323,7 +323,7 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
323 check_pages(tr2); 323 check_pages(tr2);
324} 324}
325 325
326notrace void 326void
327update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 327update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
328{ 328{
329 struct trace_array_cpu *data; 329 struct trace_array_cpu *data;
@@ -348,7 +348,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
348 * @tsk - task with the latency 348 * @tsk - task with the latency
349 * @cpu - the cpu of the buffer to copy. 349 * @cpu - the cpu of the buffer to copy.
350 */ 350 */
351notrace void 351void
352update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 352update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
353{ 353{
354 struct trace_array_cpu *data = tr->data[cpu]; 354 struct trace_array_cpu *data = tr->data[cpu];
@@ -471,7 +471,7 @@ void unregister_tracer(struct tracer *type)
471 mutex_unlock(&trace_types_lock); 471 mutex_unlock(&trace_types_lock);
472} 472}
473 473
474notrace void tracing_reset(struct trace_array_cpu *data) 474void tracing_reset(struct trace_array_cpu *data)
475{ 475{
476 data->trace_idx = 0; 476 data->trace_idx = 0;
477 data->trace_head = data->trace_tail = head_page(data); 477 data->trace_head = data->trace_tail = head_page(data);
@@ -494,9 +494,9 @@ static void trace_init_cmdlines(void)
494 cmdline_idx = 0; 494 cmdline_idx = 0;
495} 495}
496 496
497notrace void trace_stop_cmdline_recording(void); 497void trace_stop_cmdline_recording(void);
498 498
499static notrace void trace_save_cmdline(struct task_struct *tsk) 499static void trace_save_cmdline(struct task_struct *tsk)
500{ 500{
501 unsigned map; 501 unsigned map;
502 unsigned idx; 502 unsigned idx;
@@ -531,7 +531,7 @@ static notrace void trace_save_cmdline(struct task_struct *tsk)
531 spin_unlock(&trace_cmdline_lock); 531 spin_unlock(&trace_cmdline_lock);
532} 532}
533 533
534static notrace char *trace_find_cmdline(int pid) 534static char *trace_find_cmdline(int pid)
535{ 535{
536 char *cmdline = "<...>"; 536 char *cmdline = "<...>";
537 unsigned map; 537 unsigned map;
@@ -552,7 +552,7 @@ static notrace char *trace_find_cmdline(int pid)
552 return cmdline; 552 return cmdline;
553} 553}
554 554
555notrace void tracing_record_cmdline(struct task_struct *tsk) 555void tracing_record_cmdline(struct task_struct *tsk)
556{ 556{
557 if (atomic_read(&trace_record_cmdline_disabled)) 557 if (atomic_read(&trace_record_cmdline_disabled))
558 return; 558 return;
@@ -560,7 +560,7 @@ notrace void tracing_record_cmdline(struct task_struct *tsk)
560 trace_save_cmdline(tsk); 560 trace_save_cmdline(tsk);
561} 561}
562 562
563static inline notrace struct list_head * 563static inline struct list_head *
564trace_next_list(struct trace_array_cpu *data, struct list_head *next) 564trace_next_list(struct trace_array_cpu *data, struct list_head *next)
565{ 565{
566 /* 566 /*
@@ -574,7 +574,7 @@ trace_next_list(struct trace_array_cpu *data, struct list_head *next)
574 return next; 574 return next;
575} 575}
576 576
577static inline notrace void * 577static inline void *
578trace_next_page(struct trace_array_cpu *data, void *addr) 578trace_next_page(struct trace_array_cpu *data, void *addr)
579{ 579{
580 struct list_head *next; 580 struct list_head *next;
@@ -588,7 +588,7 @@ trace_next_page(struct trace_array_cpu *data, void *addr)
588 return page_address(page); 588 return page_address(page);
589} 589}
590 590
591static inline notrace struct trace_entry * 591static inline struct trace_entry *
592tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data) 592tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
593{ 593{
594 unsigned long idx, idx_next; 594 unsigned long idx, idx_next;
@@ -623,7 +623,7 @@ tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
623 return entry; 623 return entry;
624} 624}
625 625
626static inline notrace void 626static inline void
627tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) 627tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
628{ 628{
629 struct task_struct *tsk = current; 629 struct task_struct *tsk = current;
@@ -640,7 +640,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
640 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 640 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
641} 641}
642 642
643notrace void 643void
644trace_function(struct trace_array *tr, struct trace_array_cpu *data, 644trace_function(struct trace_array *tr, struct trace_array_cpu *data,
645 unsigned long ip, unsigned long parent_ip, unsigned long flags) 645 unsigned long ip, unsigned long parent_ip, unsigned long flags)
646{ 646{
@@ -659,7 +659,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
659 wake_up (&trace_wait); 659 wake_up (&trace_wait);
660} 660}
661 661
662notrace void 662void
663ftrace(struct trace_array *tr, struct trace_array_cpu *data, 663ftrace(struct trace_array *tr, struct trace_array_cpu *data,
664 unsigned long ip, unsigned long parent_ip, unsigned long flags) 664 unsigned long ip, unsigned long parent_ip, unsigned long flags)
665{ 665{
@@ -667,7 +667,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
667 trace_function(tr, data, ip, parent_ip, flags); 667 trace_function(tr, data, ip, parent_ip, flags);
668} 668}
669 669
670notrace void 670void
671trace_special(struct trace_array *tr, struct trace_array_cpu *data, 671trace_special(struct trace_array *tr, struct trace_array_cpu *data,
672 unsigned long arg1, unsigned long arg2, unsigned long arg3) 672 unsigned long arg1, unsigned long arg2, unsigned long arg3)
673{ 673{
@@ -687,7 +687,7 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
687 wake_up (&trace_wait); 687 wake_up (&trace_wait);
688} 688}
689 689
690notrace void 690void
691tracing_sched_switch_trace(struct trace_array *tr, 691tracing_sched_switch_trace(struct trace_array *tr,
692 struct trace_array_cpu *data, 692 struct trace_array_cpu *data,
693 struct task_struct *prev, struct task_struct *next, 693 struct task_struct *prev, struct task_struct *next,
@@ -712,7 +712,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
712} 712}
713 713
714#ifdef CONFIG_FTRACE 714#ifdef CONFIG_FTRACE
715static notrace void 715static void
716function_trace_call(unsigned long ip, unsigned long parent_ip) 716function_trace_call(unsigned long ip, unsigned long parent_ip)
717{ 717{
718 struct trace_array *tr = &global_trace; 718 struct trace_array *tr = &global_trace;
@@ -741,12 +741,12 @@ static struct ftrace_ops trace_ops __read_mostly =
741 .func = function_trace_call, 741 .func = function_trace_call,
742}; 742};
743 743
744notrace void tracing_start_function_trace(void) 744void tracing_start_function_trace(void)
745{ 745{
746 register_ftrace_function(&trace_ops); 746 register_ftrace_function(&trace_ops);
747} 747}
748 748
749notrace void tracing_stop_function_trace(void) 749void tracing_stop_function_trace(void)
750{ 750{
751 unregister_ftrace_function(&trace_ops); 751 unregister_ftrace_function(&trace_ops);
752} 752}
@@ -786,7 +786,7 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
786 return &array[iter->next_page_idx[cpu]]; 786 return &array[iter->next_page_idx[cpu]];
787} 787}
788 788
789static struct trace_entry * notrace 789static struct trace_entry *
790find_next_entry(struct trace_iterator *iter, int *ent_cpu) 790find_next_entry(struct trace_iterator *iter, int *ent_cpu)
791{ 791{
792 struct trace_array *tr = iter->tr; 792 struct trace_array *tr = iter->tr;
@@ -813,7 +813,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu)
813 return next; 813 return next;
814} 814}
815 815
816static notrace void trace_iterator_increment(struct trace_iterator *iter) 816static void trace_iterator_increment(struct trace_iterator *iter)
817{ 817{
818 iter->idx++; 818 iter->idx++;
819 iter->next_idx[iter->cpu]++; 819 iter->next_idx[iter->cpu]++;
@@ -828,7 +828,7 @@ static notrace void trace_iterator_increment(struct trace_iterator *iter)
828 } 828 }
829} 829}
830 830
831static notrace void trace_consume(struct trace_iterator *iter) 831static void trace_consume(struct trace_iterator *iter)
832{ 832{
833 struct trace_array_cpu *data = iter->tr->data[iter->cpu]; 833 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
834 834
@@ -844,7 +844,7 @@ static notrace void trace_consume(struct trace_iterator *iter)
844 data->trace_idx = 0; 844 data->trace_idx = 0;
845} 845}
846 846
847static notrace void *find_next_entry_inc(struct trace_iterator *iter) 847static void *find_next_entry_inc(struct trace_iterator *iter)
848{ 848{
849 struct trace_entry *next; 849 struct trace_entry *next;
850 int next_cpu = -1; 850 int next_cpu = -1;
@@ -863,7 +863,7 @@ static notrace void *find_next_entry_inc(struct trace_iterator *iter)
863 return next ? iter : NULL; 863 return next ? iter : NULL;
864} 864}
865 865
866static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos) 866static void *s_next(struct seq_file *m, void *v, loff_t *pos)
867{ 867{
868 struct trace_iterator *iter = m->private; 868 struct trace_iterator *iter = m->private;
869 void *last_ent = iter->ent; 869 void *last_ent = iter->ent;
@@ -978,7 +978,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
978# define IP_FMT "%016lx" 978# define IP_FMT "%016lx"
979#endif 979#endif
980 980
981static notrace int 981static int
982seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 982seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
983{ 983{
984 int ret; 984 int ret;
@@ -999,7 +999,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
999 return ret; 999 return ret;
1000} 1000}
1001 1001
1002static notrace void print_lat_help_header(struct seq_file *m) 1002static void print_lat_help_header(struct seq_file *m)
1003{ 1003{
1004 seq_puts(m, "# _------=> CPU# \n"); 1004 seq_puts(m, "# _------=> CPU# \n");
1005 seq_puts(m, "# / _-----=> irqs-off \n"); 1005 seq_puts(m, "# / _-----=> irqs-off \n");
@@ -1012,14 +1012,14 @@ static notrace void print_lat_help_header(struct seq_file *m)
1012 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1012 seq_puts(m, "# \\ / ||||| \\ | / \n");
1013} 1013}
1014 1014
1015static notrace void print_func_help_header(struct seq_file *m) 1015static void print_func_help_header(struct seq_file *m)
1016{ 1016{
1017 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1017 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1018 seq_puts(m, "# | | | | |\n"); 1018 seq_puts(m, "# | | | | |\n");
1019} 1019}
1020 1020
1021 1021
1022static notrace void 1022static void
1023print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1023print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1024{ 1024{
1025 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1025 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1090,7 +1090,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1090 seq_puts(m, "\n"); 1090 seq_puts(m, "\n");
1091} 1091}
1092 1092
1093static notrace void 1093static void
1094lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 1094lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1095{ 1095{
1096 int hardirq, softirq; 1096 int hardirq, softirq;
@@ -1127,7 +1127,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1127 1127
1128unsigned long preempt_mark_thresh = 100; 1128unsigned long preempt_mark_thresh = 100;
1129 1129
1130static notrace void 1130static void
1131lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, 1131lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1132 unsigned long rel_usecs) 1132 unsigned long rel_usecs)
1133{ 1133{
@@ -1142,7 +1142,7 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1142 1142
1143static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1143static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1144 1144
1145static notrace int 1145static int
1146print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1146print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1147{ 1147{
1148 struct trace_seq *s = &iter->seq; 1148 struct trace_seq *s = &iter->seq;
@@ -1206,7 +1206,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1206 return 1; 1206 return 1;
1207} 1207}
1208 1208
1209static notrace int print_trace_fmt(struct trace_iterator *iter) 1209static int print_trace_fmt(struct trace_iterator *iter)
1210{ 1210{
1211 struct trace_seq *s = &iter->seq; 1211 struct trace_seq *s = &iter->seq;
1212 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1212 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1279,7 +1279,7 @@ static notrace int print_trace_fmt(struct trace_iterator *iter)
1279 return 1; 1279 return 1;
1280} 1280}
1281 1281
1282static notrace int print_raw_fmt(struct trace_iterator *iter) 1282static int print_raw_fmt(struct trace_iterator *iter)
1283{ 1283{
1284 struct trace_seq *s = &iter->seq; 1284 struct trace_seq *s = &iter->seq;
1285 struct trace_entry *entry; 1285 struct trace_entry *entry;
@@ -1336,7 +1336,7 @@ do { \
1336 return 0; \ 1336 return 0; \
1337} while (0) 1337} while (0)
1338 1338
1339static notrace int print_hex_fmt(struct trace_iterator *iter) 1339static int print_hex_fmt(struct trace_iterator *iter)
1340{ 1340{
1341 struct trace_seq *s = &iter->seq; 1341 struct trace_seq *s = &iter->seq;
1342 unsigned char newline = '\n'; 1342 unsigned char newline = '\n';
@@ -1375,7 +1375,7 @@ static notrace int print_hex_fmt(struct trace_iterator *iter)
1375 return 1; 1375 return 1;
1376} 1376}
1377 1377
1378static notrace int print_bin_fmt(struct trace_iterator *iter) 1378static int print_bin_fmt(struct trace_iterator *iter)
1379{ 1379{
1380 struct trace_seq *s = &iter->seq; 1380 struct trace_seq *s = &iter->seq;
1381 struct trace_entry *entry; 1381 struct trace_entry *entry;
@@ -1475,7 +1475,7 @@ static struct seq_operations tracer_seq_ops = {
1475 .show = s_show, 1475 .show = s_show,
1476}; 1476};
1477 1477
1478static struct trace_iterator notrace * 1478static struct trace_iterator *
1479__tracing_open(struct inode *inode, struct file *file, int *ret) 1479__tracing_open(struct inode *inode, struct file *file, int *ret)
1480{ 1480{
1481 struct trace_iterator *iter; 1481 struct trace_iterator *iter;
@@ -1572,7 +1572,7 @@ static int tracing_lt_open(struct inode *inode, struct file *file)
1572} 1572}
1573 1573
1574 1574
1575static notrace void * 1575static void *
1576t_next(struct seq_file *m, void *v, loff_t *pos) 1576t_next(struct seq_file *m, void *v, loff_t *pos)
1577{ 1577{
1578 struct tracer *t = m->private; 1578 struct tracer *t = m->private;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index faf9f67246ac..2b7352bf1ce6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -150,7 +150,7 @@ struct trace_iterator {
150 long idx; 150 long idx;
151}; 151};
152 152
153void notrace tracing_reset(struct trace_array_cpu *data); 153void tracing_reset(struct trace_array_cpu *data);
154int tracing_open_generic(struct inode *inode, struct file *filp); 154int tracing_open_generic(struct inode *inode, struct file *filp);
155struct dentry *tracing_init_dentry(void); 155struct dentry *tracing_init_dentry(void);
156void ftrace(struct trace_array *tr, 156void ftrace(struct trace_array *tr,
@@ -189,10 +189,10 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
189void update_max_tr_single(struct trace_array *tr, 189void update_max_tr_single(struct trace_array *tr,
190 struct task_struct *tsk, int cpu); 190 struct task_struct *tsk, int cpu);
191 191
192extern notrace cycle_t ftrace_now(int cpu); 192extern cycle_t ftrace_now(int cpu);
193 193
194#ifdef CONFIG_SCHED_TRACER 194#ifdef CONFIG_SCHED_TRACER
195extern void notrace 195extern void
196wakeup_sched_switch(struct task_struct *prev, struct task_struct *next); 196wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
197#else 197#else
198static inline void 198static inline void
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 69a0eb00a0a5..4165d34bd28a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,7 +16,7 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static notrace void function_reset(struct trace_array *tr) 19static void function_reset(struct trace_array *tr)
20{ 20{
21 int cpu; 21 int cpu;
22 22
@@ -26,30 +26,30 @@ static notrace void function_reset(struct trace_array *tr)
26 tracing_reset(tr->data[cpu]); 26 tracing_reset(tr->data[cpu]);
27} 27}
28 28
29static notrace void start_function_trace(struct trace_array *tr) 29static void start_function_trace(struct trace_array *tr)
30{ 30{
31 function_reset(tr); 31 function_reset(tr);
32 tracing_start_function_trace(); 32 tracing_start_function_trace();
33} 33}
34 34
35static notrace void stop_function_trace(struct trace_array *tr) 35static void stop_function_trace(struct trace_array *tr)
36{ 36{
37 tracing_stop_function_trace(); 37 tracing_stop_function_trace();
38} 38}
39 39
40static notrace void function_trace_init(struct trace_array *tr) 40static void function_trace_init(struct trace_array *tr)
41{ 41{
42 if (tr->ctrl) 42 if (tr->ctrl)
43 start_function_trace(tr); 43 start_function_trace(tr);
44} 44}
45 45
46static notrace void function_trace_reset(struct trace_array *tr) 46static void function_trace_reset(struct trace_array *tr)
47{ 47{
48 if (tr->ctrl) 48 if (tr->ctrl)
49 stop_function_trace(tr); 49 stop_function_trace(tr);
50} 50}
51 51
52static notrace void function_trace_ctrl_update(struct trace_array *tr) 52static void function_trace_ctrl_update(struct trace_array *tr)
53{ 53{
54 if (tr->ctrl) 54 if (tr->ctrl)
55 start_function_trace(tr); 55 start_function_trace(tr);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2ac0d09db6fb..7a4dc014b8ab 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -33,7 +33,7 @@ enum {
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35#ifdef CONFIG_PREEMPT_TRACER 35#ifdef CONFIG_PREEMPT_TRACER
36static inline int notrace 36static inline int
37preempt_trace(void) 37preempt_trace(void)
38{ 38{
39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
@@ -43,7 +43,7 @@ preempt_trace(void)
43#endif 43#endif
44 44
45#ifdef CONFIG_IRQSOFF_TRACER 45#ifdef CONFIG_IRQSOFF_TRACER
46static inline int notrace 46static inline int
47irq_trace(void) 47irq_trace(void)
48{ 48{
49 return ((trace_type & TRACER_IRQS_OFF) && 49 return ((trace_type & TRACER_IRQS_OFF) &&
@@ -67,7 +67,7 @@ static __cacheline_aligned_in_smp unsigned long max_sequence;
67/* 67/*
68 * irqsoff uses its own tracer function to keep the overhead down: 68 * irqsoff uses its own tracer function to keep the overhead down:
69 */ 69 */
70static void notrace 70static void
71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72{ 72{
73 struct trace_array *tr = irqsoff_trace; 73 struct trace_array *tr = irqsoff_trace;
@@ -109,7 +109,7 @@ static struct ftrace_ops trace_ops __read_mostly =
109/* 109/*
110 * Should this new latency be reported/recorded? 110 * Should this new latency be reported/recorded?
111 */ 111 */
112static int notrace report_latency(cycle_t delta) 112static int report_latency(cycle_t delta)
113{ 113{
114 if (tracing_thresh) { 114 if (tracing_thresh) {
115 if (delta < tracing_thresh) 115 if (delta < tracing_thresh)
@@ -121,7 +121,7 @@ static int notrace report_latency(cycle_t delta)
121 return 1; 121 return 1;
122} 122}
123 123
124static void notrace 124static void
125check_critical_timing(struct trace_array *tr, 125check_critical_timing(struct trace_array *tr,
126 struct trace_array_cpu *data, 126 struct trace_array_cpu *data,
127 unsigned long parent_ip, 127 unsigned long parent_ip,
@@ -191,7 +191,7 @@ out:
191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
192} 192}
193 193
194static inline void notrace 194static inline void
195start_critical_timing(unsigned long ip, unsigned long parent_ip) 195start_critical_timing(unsigned long ip, unsigned long parent_ip)
196{ 196{
197 int cpu; 197 int cpu;
@@ -228,7 +228,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
228 atomic_dec(&data->disabled); 228 atomic_dec(&data->disabled);
229} 229}
230 230
231static inline void notrace 231static inline void
232stop_critical_timing(unsigned long ip, unsigned long parent_ip) 232stop_critical_timing(unsigned long ip, unsigned long parent_ip)
233{ 233{
234 int cpu; 234 int cpu;
@@ -261,13 +261,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
261} 261}
262 262
263/* start and stop critical timings used to for stoppage (in idle) */ 263/* start and stop critical timings used to for stoppage (in idle) */
264void notrace start_critical_timings(void) 264void start_critical_timings(void)
265{ 265{
266 if (preempt_trace() || irq_trace()) 266 if (preempt_trace() || irq_trace())
267 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 267 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
268} 268}
269 269
270void notrace stop_critical_timings(void) 270void stop_critical_timings(void)
271{ 271{
272 if (preempt_trace() || irq_trace()) 272 if (preempt_trace() || irq_trace())
273 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 273 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -275,13 +275,13 @@ void notrace stop_critical_timings(void)
275 275
276#ifdef CONFIG_IRQSOFF_TRACER 276#ifdef CONFIG_IRQSOFF_TRACER
277#ifdef CONFIG_PROVE_LOCKING 277#ifdef CONFIG_PROVE_LOCKING
278void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) 278void time_hardirqs_on(unsigned long a0, unsigned long a1)
279{ 279{
280 if (!preempt_trace() && irq_trace()) 280 if (!preempt_trace() && irq_trace())
281 stop_critical_timing(a0, a1); 281 stop_critical_timing(a0, a1);
282} 282}
283 283
284void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) 284void time_hardirqs_off(unsigned long a0, unsigned long a1)
285{ 285{
286 if (!preempt_trace() && irq_trace()) 286 if (!preempt_trace() && irq_trace())
287 start_critical_timing(a0, a1); 287 start_critical_timing(a0, a1);
@@ -309,35 +309,35 @@ void trace_softirqs_off(unsigned long ip)
309{ 309{
310} 310}
311 311
312inline notrace void print_irqtrace_events(struct task_struct *curr) 312inline void print_irqtrace_events(struct task_struct *curr)
313{ 313{
314} 314}
315 315
316/* 316/*
317 * We are only interested in hardirq on/off events: 317 * We are only interested in hardirq on/off events:
318 */ 318 */
319void notrace trace_hardirqs_on(void) 319void trace_hardirqs_on(void)
320{ 320{
321 if (!preempt_trace() && irq_trace()) 321 if (!preempt_trace() && irq_trace())
322 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 322 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
323} 323}
324EXPORT_SYMBOL(trace_hardirqs_on); 324EXPORT_SYMBOL(trace_hardirqs_on);
325 325
326void notrace trace_hardirqs_off(void) 326void trace_hardirqs_off(void)
327{ 327{
328 if (!preempt_trace() && irq_trace()) 328 if (!preempt_trace() && irq_trace())
329 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 329 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
330} 330}
331EXPORT_SYMBOL(trace_hardirqs_off); 331EXPORT_SYMBOL(trace_hardirqs_off);
332 332
333void notrace trace_hardirqs_on_caller(unsigned long caller_addr) 333void trace_hardirqs_on_caller(unsigned long caller_addr)
334{ 334{
335 if (!preempt_trace() && irq_trace()) 335 if (!preempt_trace() && irq_trace())
336 stop_critical_timing(CALLER_ADDR0, caller_addr); 336 stop_critical_timing(CALLER_ADDR0, caller_addr);
337} 337}
338EXPORT_SYMBOL(trace_hardirqs_on_caller); 338EXPORT_SYMBOL(trace_hardirqs_on_caller);
339 339
340void notrace trace_hardirqs_off_caller(unsigned long caller_addr) 340void trace_hardirqs_off_caller(unsigned long caller_addr)
341{ 341{
342 if (!preempt_trace() && irq_trace()) 342 if (!preempt_trace() && irq_trace())
343 start_critical_timing(CALLER_ADDR0, caller_addr); 343 start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -348,12 +348,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
348#endif /* CONFIG_IRQSOFF_TRACER */ 348#endif /* CONFIG_IRQSOFF_TRACER */
349 349
350#ifdef CONFIG_PREEMPT_TRACER 350#ifdef CONFIG_PREEMPT_TRACER
351void notrace trace_preempt_on(unsigned long a0, unsigned long a1) 351void trace_preempt_on(unsigned long a0, unsigned long a1)
352{ 352{
353 stop_critical_timing(a0, a1); 353 stop_critical_timing(a0, a1);
354} 354}
355 355
356void notrace trace_preempt_off(unsigned long a0, unsigned long a1) 356void trace_preempt_off(unsigned long a0, unsigned long a1)
357{ 357{
358 start_critical_timing(a0, a1); 358 start_critical_timing(a0, a1);
359} 359}
@@ -395,14 +395,14 @@ static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
395 stop_irqsoff_tracer(tr); 395 stop_irqsoff_tracer(tr);
396} 396}
397 397
398static void notrace irqsoff_tracer_open(struct trace_iterator *iter) 398static void irqsoff_tracer_open(struct trace_iterator *iter)
399{ 399{
400 /* stop the trace while dumping */ 400 /* stop the trace while dumping */
401 if (iter->tr->ctrl) 401 if (iter->tr->ctrl)
402 stop_irqsoff_tracer(iter->tr); 402 stop_irqsoff_tracer(iter->tr);
403} 403}
404 404
405static void notrace irqsoff_tracer_close(struct trace_iterator *iter) 405static void irqsoff_tracer_close(struct trace_iterator *iter)
406{ 406{
407 if (iter->tr->ctrl) 407 if (iter->tr->ctrl)
408 start_irqsoff_tracer(iter->tr); 408 start_irqsoff_tracer(iter->tr);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 8d656672da93..b738eaca1dbe 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -17,7 +17,7 @@
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19 19
20static void notrace 20static void
21ctx_switch_func(struct task_struct *prev, struct task_struct *next) 21ctx_switch_func(struct task_struct *prev, struct task_struct *next)
22{ 22{
23 struct trace_array *tr = ctx_trace; 23 struct trace_array *tr = ctx_trace;
@@ -57,7 +57,7 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
57 wakeup_sched_switch(prev, next); 57 wakeup_sched_switch(prev, next);
58} 58}
59 59
60static notrace void sched_switch_reset(struct trace_array *tr) 60static void sched_switch_reset(struct trace_array *tr)
61{ 61{
62 int cpu; 62 int cpu;
63 63
@@ -67,18 +67,18 @@ static notrace void sched_switch_reset(struct trace_array *tr)
67 tracing_reset(tr->data[cpu]); 67 tracing_reset(tr->data[cpu]);
68} 68}
69 69
70static notrace void start_sched_trace(struct trace_array *tr) 70static void start_sched_trace(struct trace_array *tr)
71{ 71{
72 sched_switch_reset(tr); 72 sched_switch_reset(tr);
73 tracer_enabled = 1; 73 tracer_enabled = 1;
74} 74}
75 75
76static notrace void stop_sched_trace(struct trace_array *tr) 76static void stop_sched_trace(struct trace_array *tr)
77{ 77{
78 tracer_enabled = 0; 78 tracer_enabled = 0;
79} 79}
80 80
81static notrace void sched_switch_trace_init(struct trace_array *tr) 81static void sched_switch_trace_init(struct trace_array *tr)
82{ 82{
83 ctx_trace = tr; 83 ctx_trace = tr;
84 84
@@ -86,7 +86,7 @@ static notrace void sched_switch_trace_init(struct trace_array *tr)
86 start_sched_trace(tr); 86 start_sched_trace(tr);
87} 87}
88 88
89static notrace void sched_switch_trace_reset(struct trace_array *tr) 89static void sched_switch_trace_reset(struct trace_array *tr)
90{ 90{
91 if (tr->ctrl) 91 if (tr->ctrl)
92 stop_sched_trace(tr); 92 stop_sched_trace(tr);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3549e4154f1f..662679c78b66 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -27,12 +27,12 @@ static unsigned wakeup_prio = -1;
27 27
28static DEFINE_SPINLOCK(wakeup_lock); 28static DEFINE_SPINLOCK(wakeup_lock);
29 29
30static void notrace __wakeup_reset(struct trace_array *tr); 30static void __wakeup_reset(struct trace_array *tr);
31 31
32/* 32/*
33 * Should this new latency be reported/recorded? 33 * Should this new latency be reported/recorded?
34 */ 34 */
35static int notrace report_latency(cycle_t delta) 35static int report_latency(cycle_t delta)
36{ 36{
37 if (tracing_thresh) { 37 if (tracing_thresh) {
38 if (delta < tracing_thresh) 38 if (delta < tracing_thresh)
@@ -44,7 +44,7 @@ static int notrace report_latency(cycle_t delta)
44 return 1; 44 return 1;
45} 45}
46 46
47void notrace 47void
48wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) 48wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
49{ 49{
50 unsigned long latency = 0, t0 = 0, t1 = 0; 50 unsigned long latency = 0, t0 = 0, t1 = 0;
@@ -126,7 +126,7 @@ out:
126 atomic_dec(&tr->data[cpu]->disabled); 126 atomic_dec(&tr->data[cpu]->disabled);
127} 127}
128 128
129static void notrace __wakeup_reset(struct trace_array *tr) 129static void __wakeup_reset(struct trace_array *tr)
130{ 130{
131 struct trace_array_cpu *data; 131 struct trace_array_cpu *data;
132 int cpu; 132 int cpu;
@@ -147,7 +147,7 @@ static void notrace __wakeup_reset(struct trace_array *tr)
147 wakeup_task = NULL; 147 wakeup_task = NULL;
148} 148}
149 149
150static void notrace wakeup_reset(struct trace_array *tr) 150static void wakeup_reset(struct trace_array *tr)
151{ 151{
152 unsigned long flags; 152 unsigned long flags;
153 153
@@ -156,7 +156,7 @@ static void notrace wakeup_reset(struct trace_array *tr)
156 spin_unlock_irqrestore(&wakeup_lock, flags); 156 spin_unlock_irqrestore(&wakeup_lock, flags);
157} 157}
158 158
159static notrace void 159static void
160wakeup_check_start(struct trace_array *tr, struct task_struct *p, 160wakeup_check_start(struct trace_array *tr, struct task_struct *p,
161 struct task_struct *curr) 161 struct task_struct *curr)
162{ 162{
@@ -201,7 +201,7 @@ out:
201 atomic_dec(&tr->data[cpu]->disabled); 201 atomic_dec(&tr->data[cpu]->disabled);
202} 202}
203 203
204notrace void 204void
205ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr) 205ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
206{ 206{
207 if (likely(!tracer_enabled)) 207 if (likely(!tracer_enabled))
@@ -210,7 +210,7 @@ ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
210 wakeup_check_start(wakeup_trace, wakee, curr); 210 wakeup_check_start(wakeup_trace, wakee, curr);
211} 211}
212 212
213notrace void 213void
214ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr) 214ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
215{ 215{
216 if (likely(!tracer_enabled)) 216 if (likely(!tracer_enabled))
@@ -219,7 +219,7 @@ ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
219 wakeup_check_start(wakeup_trace, wakee, curr); 219 wakeup_check_start(wakeup_trace, wakee, curr);
220} 220}
221 221
222static notrace void start_wakeup_tracer(struct trace_array *tr) 222static void start_wakeup_tracer(struct trace_array *tr)
223{ 223{
224 wakeup_reset(tr); 224 wakeup_reset(tr);
225 225
@@ -237,12 +237,12 @@ static notrace void start_wakeup_tracer(struct trace_array *tr)
237 return; 237 return;
238} 238}
239 239
240static notrace void stop_wakeup_tracer(struct trace_array *tr) 240static void stop_wakeup_tracer(struct trace_array *tr)
241{ 241{
242 tracer_enabled = 0; 242 tracer_enabled = 0;
243} 243}
244 244
245static notrace void wakeup_tracer_init(struct trace_array *tr) 245static void wakeup_tracer_init(struct trace_array *tr)
246{ 246{
247 wakeup_trace = tr; 247 wakeup_trace = tr;
248 248
@@ -250,7 +250,7 @@ static notrace void wakeup_tracer_init(struct trace_array *tr)
250 start_wakeup_tracer(tr); 250 start_wakeup_tracer(tr);
251} 251}
252 252
253static notrace void wakeup_tracer_reset(struct trace_array *tr) 253static void wakeup_tracer_reset(struct trace_array *tr)
254{ 254{
255 if (tr->ctrl) { 255 if (tr->ctrl) {
256 stop_wakeup_tracer(tr); 256 stop_wakeup_tracer(tr);
@@ -267,14 +267,14 @@ static void wakeup_tracer_ctrl_update(struct trace_array *tr)
267 stop_wakeup_tracer(tr); 267 stop_wakeup_tracer(tr);
268} 268}
269 269
270static void notrace wakeup_tracer_open(struct trace_iterator *iter) 270static void wakeup_tracer_open(struct trace_iterator *iter)
271{ 271{
272 /* stop the trace while dumping */ 272 /* stop the trace while dumping */
273 if (iter->tr->ctrl) 273 if (iter->tr->ctrl)
274 stop_wakeup_tracer(iter->tr); 274 stop_wakeup_tracer(iter->tr);
275} 275}
276 276
277static void notrace wakeup_tracer_close(struct trace_iterator *iter) 277static void wakeup_tracer_close(struct trace_iterator *iter)
278{ 278{
279 /* forget about any processes we were recording */ 279 /* forget about any processes we were recording */
280 if (iter->tr->ctrl) 280 if (iter->tr->ctrl)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 546307de6e3d..85715b86a342 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -3,7 +3,7 @@
3#include <linux/kthread.h> 3#include <linux/kthread.h>
4#include <linux/delay.h> 4#include <linux/delay.h>
5 5
6static notrace inline int trace_valid_entry(struct trace_entry *entry) 6static inline int trace_valid_entry(struct trace_entry *entry)
7{ 7{
8 switch (entry->type) { 8 switch (entry->type) {
9 case TRACE_FN: 9 case TRACE_FN: