aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 15:20:51 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:58:28 -0400
commite309b41dd65aa953f86765eeeecc941d8e1e8b8f (patch)
tree295d4ed6e2a766607f889a04b977ca27cc24929e /kernel/trace/trace_irqsoff.c
parentb53dde9d34f2df396540988ebc65c33400f57b04 (diff)
ftrace: remove notrace
now that we have a kbuild method for notrace, no need to pollute the C code with the annotations. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2ac0d09db6fb..7a4dc014b8ab 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -33,7 +33,7 @@ enum {
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35#ifdef CONFIG_PREEMPT_TRACER 35#ifdef CONFIG_PREEMPT_TRACER
36static inline int notrace 36static inline int
37preempt_trace(void) 37preempt_trace(void)
38{ 38{
39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
@@ -43,7 +43,7 @@ preempt_trace(void)
43#endif 43#endif
44 44
45#ifdef CONFIG_IRQSOFF_TRACER 45#ifdef CONFIG_IRQSOFF_TRACER
46static inline int notrace 46static inline int
47irq_trace(void) 47irq_trace(void)
48{ 48{
49 return ((trace_type & TRACER_IRQS_OFF) && 49 return ((trace_type & TRACER_IRQS_OFF) &&
@@ -67,7 +67,7 @@ static __cacheline_aligned_in_smp unsigned long max_sequence;
67/* 67/*
68 * irqsoff uses its own tracer function to keep the overhead down: 68 * irqsoff uses its own tracer function to keep the overhead down:
69 */ 69 */
70static void notrace 70static void
71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72{ 72{
73 struct trace_array *tr = irqsoff_trace; 73 struct trace_array *tr = irqsoff_trace;
@@ -109,7 +109,7 @@ static struct ftrace_ops trace_ops __read_mostly =
109/* 109/*
110 * Should this new latency be reported/recorded? 110 * Should this new latency be reported/recorded?
111 */ 111 */
112static int notrace report_latency(cycle_t delta) 112static int report_latency(cycle_t delta)
113{ 113{
114 if (tracing_thresh) { 114 if (tracing_thresh) {
115 if (delta < tracing_thresh) 115 if (delta < tracing_thresh)
@@ -121,7 +121,7 @@ static int notrace report_latency(cycle_t delta)
121 return 1; 121 return 1;
122} 122}
123 123
124static void notrace 124static void
125check_critical_timing(struct trace_array *tr, 125check_critical_timing(struct trace_array *tr,
126 struct trace_array_cpu *data, 126 struct trace_array_cpu *data,
127 unsigned long parent_ip, 127 unsigned long parent_ip,
@@ -191,7 +191,7 @@ out:
191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 191 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
192} 192}
193 193
194static inline void notrace 194static inline void
195start_critical_timing(unsigned long ip, unsigned long parent_ip) 195start_critical_timing(unsigned long ip, unsigned long parent_ip)
196{ 196{
197 int cpu; 197 int cpu;
@@ -228,7 +228,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
228 atomic_dec(&data->disabled); 228 atomic_dec(&data->disabled);
229} 229}
230 230
231static inline void notrace 231static inline void
232stop_critical_timing(unsigned long ip, unsigned long parent_ip) 232stop_critical_timing(unsigned long ip, unsigned long parent_ip)
233{ 233{
234 int cpu; 234 int cpu;
@@ -261,13 +261,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
261} 261}
262 262
263/* start and stop critical timings used to for stoppage (in idle) */ 263/* start and stop critical timings used to for stoppage (in idle) */
264void notrace start_critical_timings(void) 264void start_critical_timings(void)
265{ 265{
266 if (preempt_trace() || irq_trace()) 266 if (preempt_trace() || irq_trace())
267 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 267 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
268} 268}
269 269
270void notrace stop_critical_timings(void) 270void stop_critical_timings(void)
271{ 271{
272 if (preempt_trace() || irq_trace()) 272 if (preempt_trace() || irq_trace())
273 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 273 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -275,13 +275,13 @@ void notrace stop_critical_timings(void)
275 275
276#ifdef CONFIG_IRQSOFF_TRACER 276#ifdef CONFIG_IRQSOFF_TRACER
277#ifdef CONFIG_PROVE_LOCKING 277#ifdef CONFIG_PROVE_LOCKING
278void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) 278void time_hardirqs_on(unsigned long a0, unsigned long a1)
279{ 279{
280 if (!preempt_trace() && irq_trace()) 280 if (!preempt_trace() && irq_trace())
281 stop_critical_timing(a0, a1); 281 stop_critical_timing(a0, a1);
282} 282}
283 283
284void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) 284void time_hardirqs_off(unsigned long a0, unsigned long a1)
285{ 285{
286 if (!preempt_trace() && irq_trace()) 286 if (!preempt_trace() && irq_trace())
287 start_critical_timing(a0, a1); 287 start_critical_timing(a0, a1);
@@ -309,35 +309,35 @@ void trace_softirqs_off(unsigned long ip)
309{ 309{
310} 310}
311 311
312inline notrace void print_irqtrace_events(struct task_struct *curr) 312inline void print_irqtrace_events(struct task_struct *curr)
313{ 313{
314} 314}
315 315
316/* 316/*
317 * We are only interested in hardirq on/off events: 317 * We are only interested in hardirq on/off events:
318 */ 318 */
319void notrace trace_hardirqs_on(void) 319void trace_hardirqs_on(void)
320{ 320{
321 if (!preempt_trace() && irq_trace()) 321 if (!preempt_trace() && irq_trace())
322 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 322 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
323} 323}
324EXPORT_SYMBOL(trace_hardirqs_on); 324EXPORT_SYMBOL(trace_hardirqs_on);
325 325
326void notrace trace_hardirqs_off(void) 326void trace_hardirqs_off(void)
327{ 327{
328 if (!preempt_trace() && irq_trace()) 328 if (!preempt_trace() && irq_trace())
329 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 329 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
330} 330}
331EXPORT_SYMBOL(trace_hardirqs_off); 331EXPORT_SYMBOL(trace_hardirqs_off);
332 332
333void notrace trace_hardirqs_on_caller(unsigned long caller_addr) 333void trace_hardirqs_on_caller(unsigned long caller_addr)
334{ 334{
335 if (!preempt_trace() && irq_trace()) 335 if (!preempt_trace() && irq_trace())
336 stop_critical_timing(CALLER_ADDR0, caller_addr); 336 stop_critical_timing(CALLER_ADDR0, caller_addr);
337} 337}
338EXPORT_SYMBOL(trace_hardirqs_on_caller); 338EXPORT_SYMBOL(trace_hardirqs_on_caller);
339 339
340void notrace trace_hardirqs_off_caller(unsigned long caller_addr) 340void trace_hardirqs_off_caller(unsigned long caller_addr)
341{ 341{
342 if (!preempt_trace() && irq_trace()) 342 if (!preempt_trace() && irq_trace())
343 start_critical_timing(CALLER_ADDR0, caller_addr); 343 start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -348,12 +348,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
348#endif /* CONFIG_IRQSOFF_TRACER */ 348#endif /* CONFIG_IRQSOFF_TRACER */
349 349
350#ifdef CONFIG_PREEMPT_TRACER 350#ifdef CONFIG_PREEMPT_TRACER
351void notrace trace_preempt_on(unsigned long a0, unsigned long a1) 351void trace_preempt_on(unsigned long a0, unsigned long a1)
352{ 352{
353 stop_critical_timing(a0, a1); 353 stop_critical_timing(a0, a1);
354} 354}
355 355
356void notrace trace_preempt_off(unsigned long a0, unsigned long a1) 356void trace_preempt_off(unsigned long a0, unsigned long a1)
357{ 357{
358 start_critical_timing(a0, a1); 358 start_critical_timing(a0, a1);
359} 359}
@@ -395,14 +395,14 @@ static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
395 stop_irqsoff_tracer(tr); 395 stop_irqsoff_tracer(tr);
396} 396}
397 397
398static void notrace irqsoff_tracer_open(struct trace_iterator *iter) 398static void irqsoff_tracer_open(struct trace_iterator *iter)
399{ 399{
400 /* stop the trace while dumping */ 400 /* stop the trace while dumping */
401 if (iter->tr->ctrl) 401 if (iter->tr->ctrl)
402 stop_irqsoff_tracer(iter->tr); 402 stop_irqsoff_tracer(iter->tr);
403} 403}
404 404
405static void notrace irqsoff_tracer_close(struct trace_iterator *iter) 405static void irqsoff_tracer_close(struct trace_iterator *iter)
406{ 406{
407 if (iter->tr->ctrl) 407 if (iter->tr->ctrl)
408 start_irqsoff_tracer(iter->tr); 408 start_irqsoff_tracer(iter->tr);