aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-12 15:24:24 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-12 16:28:09 -0500
commit9f029e83e968e5661d7be045bbcb620dbb909938 (patch)
tree13beecf2626162dc8a3912c79d58fc91e2193de5 /kernel
parent2ed84eeb8808cf3c9f039213ca137ffd7d753f0e (diff)
ftrace: rename unlikely iter_ctrl to branch
Impact: rename of iter_ctrl unlikely to branch The unlikely name is ugly. This patch converts the iter_ctrl command "unlikely" and "nounlikely" to "branch" and "nobranch" respectively. It also renames a lot of internal functions to use "branch" instead of "unlikely". Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c14
-rw-r--r--kernel/trace/trace.h26
-rw-r--r--kernel/trace/trace_unlikely.c50
3 files changed, 45 insertions, 45 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bad59d32a4a9..4bf070bb5272 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -259,7 +259,7 @@ static const char *trace_options[] = {
259 "ftrace_printk", 259 "ftrace_printk",
260 "ftrace_preempt", 260 "ftrace_preempt",
261#ifdef CONFIG_BRANCH_TRACER 261#ifdef CONFIG_BRANCH_TRACER
262 "unlikely", 262 "branch",
263#endif 263#endif
264 NULL 264 NULL
265}; 265};
@@ -1651,8 +1651,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1651 trace_seq_print_cont(s, iter); 1651 trace_seq_print_cont(s, iter);
1652 break; 1652 break;
1653 } 1653 }
1654 case TRACE_UNLIKELY: { 1654 case TRACE_BRANCH: {
1655 struct trace_unlikely *field; 1655 struct trace_branch *field;
1656 1656
1657 trace_assign_type(field, entry); 1657 trace_assign_type(field, entry);
1658 1658
@@ -1802,8 +1802,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1802 return print_return_function(iter); 1802 return print_return_function(iter);
1803 break; 1803 break;
1804 } 1804 }
1805 case TRACE_UNLIKELY: { 1805 case TRACE_BRANCH: {
1806 struct trace_unlikely *field; 1806 struct trace_branch *field;
1807 1807
1808 trace_assign_type(field, entry); 1808 trace_assign_type(field, entry);
1809 1809
@@ -2619,7 +2619,7 @@ static int tracing_set_tracer(char *buf)
2619 if (t == current_trace) 2619 if (t == current_trace)
2620 goto out; 2620 goto out;
2621 2621
2622 trace_unlikely_disable(); 2622 trace_branch_disable();
2623 if (current_trace && current_trace->reset) 2623 if (current_trace && current_trace->reset)
2624 current_trace->reset(tr); 2624 current_trace->reset(tr);
2625 2625
@@ -2627,7 +2627,7 @@ static int tracing_set_tracer(char *buf)
2627 if (t->init) 2627 if (t->init)
2628 t->init(tr); 2628 t->init(tr);
2629 2629
2630 trace_unlikely_enable(tr); 2630 trace_branch_enable(tr);
2631 out: 2631 out:
2632 mutex_unlock(&trace_types_lock); 2632 mutex_unlock(&trace_types_lock);
2633 2633
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dccae6312941..7fbf37b27453 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -22,7 +22,7 @@ enum trace_type {
22 TRACE_SPECIAL, 22 TRACE_SPECIAL,
23 TRACE_MMIO_RW, 23 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP, 24 TRACE_MMIO_MAP,
25 TRACE_UNLIKELY, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_FN_RET,
@@ -137,7 +137,7 @@ struct trace_boot_ret {
137 137
138#define TRACE_FUNC_SIZE 30 138#define TRACE_FUNC_SIZE 30
139#define TRACE_FILE_SIZE 20 139#define TRACE_FILE_SIZE 20
140struct trace_unlikely { 140struct trace_branch {
141 struct trace_entry ent; 141 struct trace_entry ent;
142 unsigned line; 142 unsigned line;
143 char func[TRACE_FUNC_SIZE+1]; 143 char func[TRACE_FUNC_SIZE+1];
@@ -247,7 +247,7 @@ extern void __ftrace_bad_type(void);
247 TRACE_MMIO_MAP); \ 247 TRACE_MMIO_MAP); \
248 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 248 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
249 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 249 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
250 IF_ASSIGN(var, ent, struct trace_unlikely, TRACE_UNLIKELY); \ 250 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
251 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ 251 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
252 __ftrace_bad_type(); \ 252 __ftrace_bad_type(); \
253 } while (0) 253 } while (0)
@@ -469,7 +469,7 @@ enum trace_iterator_flags {
469 TRACE_ITER_PRINTK = 0x400, 469 TRACE_ITER_PRINTK = 0x400,
470 TRACE_ITER_PREEMPTONLY = 0x800, 470 TRACE_ITER_PREEMPTONLY = 0x800,
471#ifdef CONFIG_BRANCH_TRACER 471#ifdef CONFIG_BRANCH_TRACER
472 TRACE_ITER_UNLIKELY = 0x1000, 472 TRACE_ITER_BRANCH = 0x1000,
473#endif 473#endif
474}; 474};
475 475
@@ -531,25 +531,25 @@ static inline void ftrace_preempt_enable(int resched)
531} 531}
532 532
533#ifdef CONFIG_BRANCH_TRACER 533#ifdef CONFIG_BRANCH_TRACER
534extern int enable_unlikely_tracing(struct trace_array *tr); 534extern int enable_branch_tracing(struct trace_array *tr);
535extern void disable_unlikely_tracing(void); 535extern void disable_branch_tracing(void);
536static inline int trace_unlikely_enable(struct trace_array *tr) 536static inline int trace_branch_enable(struct trace_array *tr)
537{ 537{
538 if (trace_flags & TRACE_ITER_UNLIKELY) 538 if (trace_flags & TRACE_ITER_BRANCH)
539 return enable_unlikely_tracing(tr); 539 return enable_branch_tracing(tr);
540 return 0; 540 return 0;
541} 541}
542static inline void trace_unlikely_disable(void) 542static inline void trace_branch_disable(void)
543{ 543{
544 /* due to races, always disable */ 544 /* due to races, always disable */
545 disable_unlikely_tracing(); 545 disable_branch_tracing();
546} 546}
547#else 547#else
548static inline int trace_unlikely_enable(struct trace_array *tr) 548static inline int trace_branch_enable(struct trace_array *tr)
549{ 549{
550 return 0; 550 return 0;
551} 551}
552static inline void trace_unlikely_disable(void) 552static inline void trace_branch_disable(void)
553{ 553{
554} 554}
555#endif /* CONFIG_BRANCH_TRACER */ 555#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index 856eb3b7f694..e5d5969853a3 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -17,16 +17,16 @@
17 17
18#ifdef CONFIG_BRANCH_TRACER 18#ifdef CONFIG_BRANCH_TRACER
19 19
20static int unlikely_tracing_enabled __read_mostly; 20static int branch_tracing_enabled __read_mostly;
21static DEFINE_MUTEX(unlikely_tracing_mutex); 21static DEFINE_MUTEX(branch_tracing_mutex);
22static struct trace_array *unlikely_tracer; 22static struct trace_array *branch_tracer;
23 23
24static void 24static void
25probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) 25probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
26{ 26{
27 struct trace_array *tr = unlikely_tracer; 27 struct trace_array *tr = branch_tracer;
28 struct ring_buffer_event *event; 28 struct ring_buffer_event *event;
29 struct trace_unlikely *entry; 29 struct trace_branch *entry;
30 unsigned long flags, irq_flags; 30 unsigned long flags, irq_flags;
31 int cpu, pc; 31 int cpu, pc;
32 const char *p; 32 const char *p;
@@ -54,7 +54,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
54 pc = preempt_count(); 54 pc = preempt_count();
55 entry = ring_buffer_event_data(event); 55 entry = ring_buffer_event_data(event);
56 tracing_generic_entry_update(&entry->ent, flags, pc); 56 tracing_generic_entry_update(&entry->ent, flags, pc);
57 entry->ent.type = TRACE_UNLIKELY; 57 entry->ent.type = TRACE_BRANCH;
58 58
59 /* Strip off the path, only save the file */ 59 /* Strip off the path, only save the file */
60 p = f->file + strlen(f->file); 60 p = f->file + strlen(f->file);
@@ -77,51 +77,51 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
77} 77}
78 78
79static inline 79static inline
80void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) 80void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
81{ 81{
82 if (!unlikely_tracing_enabled) 82 if (!branch_tracing_enabled)
83 return; 83 return;
84 84
85 probe_likely_condition(f, val, expect); 85 probe_likely_condition(f, val, expect);
86} 86}
87 87
88int enable_unlikely_tracing(struct trace_array *tr) 88int enable_branch_tracing(struct trace_array *tr)
89{ 89{
90 int ret = 0; 90 int ret = 0;
91 91
92 mutex_lock(&unlikely_tracing_mutex); 92 mutex_lock(&branch_tracing_mutex);
93 unlikely_tracer = tr; 93 branch_tracer = tr;
94 /* 94 /*
95 * Must be seen before enabling. The reader is a condition 95 * Must be seen before enabling. The reader is a condition
96 * where we do not need a matching rmb() 96 * where we do not need a matching rmb()
97 */ 97 */
98 smp_wmb(); 98 smp_wmb();
99 unlikely_tracing_enabled++; 99 branch_tracing_enabled++;
100 mutex_unlock(&unlikely_tracing_mutex); 100 mutex_unlock(&branch_tracing_mutex);
101 101
102 return ret; 102 return ret;
103} 103}
104 104
105void disable_unlikely_tracing(void) 105void disable_branch_tracing(void)
106{ 106{
107 mutex_lock(&unlikely_tracing_mutex); 107 mutex_lock(&branch_tracing_mutex);
108 108
109 if (!unlikely_tracing_enabled) 109 if (!branch_tracing_enabled)
110 goto out_unlock; 110 goto out_unlock;
111 111
112 unlikely_tracing_enabled--; 112 branch_tracing_enabled--;
113 113
114 out_unlock: 114 out_unlock:
115 mutex_unlock(&unlikely_tracing_mutex); 115 mutex_unlock(&branch_tracing_mutex);
116} 116}
117#else 117#else
118static inline 118static inline
119void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) 119void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
120{ 120{
121} 121}
122#endif /* CONFIG_BRANCH_TRACER */ 122#endif /* CONFIG_BRANCH_TRACER */
123 123
124void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect) 124void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
125{ 125{
126 /* 126 /*
127 * I would love to have a trace point here instead, but the 127 * I would love to have a trace point here instead, but the
@@ -148,7 +148,7 @@ static void *
148t_next(struct seq_file *m, void *v, loff_t *pos) 148t_next(struct seq_file *m, void *v, loff_t *pos)
149{ 149{
150 struct ftrace_pointer *f = m->private; 150 struct ftrace_pointer *f = m->private;
151 struct ftrace_likely_data *p = v; 151 struct ftrace_branch_data *p = v;
152 152
153 (*pos)++; 153 (*pos)++;
154 154
@@ -180,7 +180,7 @@ static void t_stop(struct seq_file *m, void *p)
180 180
181static int t_show(struct seq_file *m, void *v) 181static int t_show(struct seq_file *m, void *v)
182{ 182{
183 struct ftrace_likely_data *p = v; 183 struct ftrace_branch_data *p = v;
184 const char *f; 184 const char *f;
185 unsigned long percent; 185 unsigned long percent;
186 186
@@ -252,7 +252,7 @@ static struct ftrace_pointer ftrace_unlikely_pos = {
252 .stop = __stop_unlikely_profile, 252 .stop = __stop_unlikely_profile,
253}; 253};
254 254
255static __init int ftrace_unlikely_init(void) 255static __init int ftrace_branch_init(void)
256{ 256{
257 struct dentry *d_tracer; 257 struct dentry *d_tracer;
258 struct dentry *entry; 258 struct dentry *entry;
@@ -275,4 +275,4 @@ static __init int ftrace_unlikely_init(void)
275 return 0; 275 return 0;
276} 276}
277 277
278device_initcall(ftrace_unlikely_init); 278device_initcall(ftrace_branch_init);