diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-05-12 15:20:51 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:58:28 -0400 |
commit | e309b41dd65aa953f86765eeeecc941d8e1e8b8f (patch) | |
tree | 295d4ed6e2a766607f889a04b977ca27cc24929e /kernel/trace/trace_irqsoff.c | |
parent | b53dde9d34f2df396540988ebc65c33400f57b04 (diff) |
ftrace: remove notrace
now that we have a kbuild method for notrace, no need to pollute the
C code with the annotations.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2ac0d09db6fb..7a4dc014b8ab 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -33,7 +33,7 @@ enum { | |||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | #ifdef CONFIG_PREEMPT_TRACER | 35 | #ifdef CONFIG_PREEMPT_TRACER |
36 | static inline int notrace | 36 | static inline int |
37 | preempt_trace(void) | 37 | preempt_trace(void) |
38 | { | 38 | { |
39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | 39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
@@ -43,7 +43,7 @@ preempt_trace(void) | |||
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef CONFIG_IRQSOFF_TRACER | 45 | #ifdef CONFIG_IRQSOFF_TRACER |
46 | static inline int notrace | 46 | static inline int |
47 | irq_trace(void) | 47 | irq_trace(void) |
48 | { | 48 | { |
49 | return ((trace_type & TRACER_IRQS_OFF) && | 49 | return ((trace_type & TRACER_IRQS_OFF) && |
@@ -67,7 +67,7 @@ static __cacheline_aligned_in_smp unsigned long max_sequence; | |||
67 | /* | 67 | /* |
68 | * irqsoff uses its own tracer function to keep the overhead down: | 68 | * irqsoff uses its own tracer function to keep the overhead down: |
69 | */ | 69 | */ |
70 | static void notrace | 70 | static void |
71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
72 | { | 72 | { |
73 | struct trace_array *tr = irqsoff_trace; | 73 | struct trace_array *tr = irqsoff_trace; |
@@ -109,7 +109,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
109 | /* | 109 | /* |
110 | * Should this new latency be reported/recorded? | 110 | * Should this new latency be reported/recorded? |
111 | */ | 111 | */ |
112 | static int notrace report_latency(cycle_t delta) | 112 | static int report_latency(cycle_t delta) |
113 | { | 113 | { |
114 | if (tracing_thresh) { | 114 | if (tracing_thresh) { |
115 | if (delta < tracing_thresh) | 115 | if (delta < tracing_thresh) |
@@ -121,7 +121,7 @@ static int notrace report_latency(cycle_t delta) | |||
121 | return 1; | 121 | return 1; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void notrace | 124 | static void |
125 | check_critical_timing(struct trace_array *tr, | 125 | check_critical_timing(struct trace_array *tr, |
126 | struct trace_array_cpu *data, | 126 | struct trace_array_cpu *data, |
127 | unsigned long parent_ip, | 127 | unsigned long parent_ip, |
@@ -191,7 +191,7 @@ out: | |||
191 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); | 191 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
192 | } | 192 | } |
193 | 193 | ||
194 | static inline void notrace | 194 | static inline void |
195 | start_critical_timing(unsigned long ip, unsigned long parent_ip) | 195 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
196 | { | 196 | { |
197 | int cpu; | 197 | int cpu; |
@@ -228,7 +228,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
228 | atomic_dec(&data->disabled); | 228 | atomic_dec(&data->disabled); |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline void notrace | 231 | static inline void |
232 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) | 232 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
233 | { | 233 | { |
234 | int cpu; | 234 | int cpu; |
@@ -261,13 +261,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
261 | } | 261 | } |
262 | 262 | ||
263 | /* start and stop critical timings used to for stoppage (in idle) */ | 263 | /* start and stop critical timings used to for stoppage (in idle) */ |
264 | void notrace start_critical_timings(void) | 264 | void start_critical_timings(void) |
265 | { | 265 | { |
266 | if (preempt_trace() || irq_trace()) | 266 | if (preempt_trace() || irq_trace()) |
267 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 267 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
268 | } | 268 | } |
269 | 269 | ||
270 | void notrace stop_critical_timings(void) | 270 | void stop_critical_timings(void) |
271 | { | 271 | { |
272 | if (preempt_trace() || irq_trace()) | 272 | if (preempt_trace() || irq_trace()) |
273 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 273 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
@@ -275,13 +275,13 @@ void notrace stop_critical_timings(void) | |||
275 | 275 | ||
276 | #ifdef CONFIG_IRQSOFF_TRACER | 276 | #ifdef CONFIG_IRQSOFF_TRACER |
277 | #ifdef CONFIG_PROVE_LOCKING | 277 | #ifdef CONFIG_PROVE_LOCKING |
278 | void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) | 278 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
279 | { | 279 | { |
280 | if (!preempt_trace() && irq_trace()) | 280 | if (!preempt_trace() && irq_trace()) |
281 | stop_critical_timing(a0, a1); | 281 | stop_critical_timing(a0, a1); |
282 | } | 282 | } |
283 | 283 | ||
284 | void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) | 284 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
285 | { | 285 | { |
286 | if (!preempt_trace() && irq_trace()) | 286 | if (!preempt_trace() && irq_trace()) |
287 | start_critical_timing(a0, a1); | 287 | start_critical_timing(a0, a1); |
@@ -309,35 +309,35 @@ void trace_softirqs_off(unsigned long ip) | |||
309 | { | 309 | { |
310 | } | 310 | } |
311 | 311 | ||
312 | inline notrace void print_irqtrace_events(struct task_struct *curr) | 312 | inline void print_irqtrace_events(struct task_struct *curr) |
313 | { | 313 | { |
314 | } | 314 | } |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * We are only interested in hardirq on/off events: | 317 | * We are only interested in hardirq on/off events: |
318 | */ | 318 | */ |
319 | void notrace trace_hardirqs_on(void) | 319 | void trace_hardirqs_on(void) |
320 | { | 320 | { |
321 | if (!preempt_trace() && irq_trace()) | 321 | if (!preempt_trace() && irq_trace()) |
322 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 322 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
323 | } | 323 | } |
324 | EXPORT_SYMBOL(trace_hardirqs_on); | 324 | EXPORT_SYMBOL(trace_hardirqs_on); |
325 | 325 | ||
326 | void notrace trace_hardirqs_off(void) | 326 | void trace_hardirqs_off(void) |
327 | { | 327 | { |
328 | if (!preempt_trace() && irq_trace()) | 328 | if (!preempt_trace() && irq_trace()) |
329 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 329 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
330 | } | 330 | } |
331 | EXPORT_SYMBOL(trace_hardirqs_off); | 331 | EXPORT_SYMBOL(trace_hardirqs_off); |
332 | 332 | ||
333 | void notrace trace_hardirqs_on_caller(unsigned long caller_addr) | 333 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
334 | { | 334 | { |
335 | if (!preempt_trace() && irq_trace()) | 335 | if (!preempt_trace() && irq_trace()) |
336 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 336 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
337 | } | 337 | } |
338 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 338 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
339 | 339 | ||
340 | void notrace trace_hardirqs_off_caller(unsigned long caller_addr) | 340 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
341 | { | 341 | { |
342 | if (!preempt_trace() && irq_trace()) | 342 | if (!preempt_trace() && irq_trace()) |
343 | start_critical_timing(CALLER_ADDR0, caller_addr); | 343 | start_critical_timing(CALLER_ADDR0, caller_addr); |
@@ -348,12 +348,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); | |||
348 | #endif /* CONFIG_IRQSOFF_TRACER */ | 348 | #endif /* CONFIG_IRQSOFF_TRACER */ |
349 | 349 | ||
350 | #ifdef CONFIG_PREEMPT_TRACER | 350 | #ifdef CONFIG_PREEMPT_TRACER |
351 | void notrace trace_preempt_on(unsigned long a0, unsigned long a1) | 351 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
352 | { | 352 | { |
353 | stop_critical_timing(a0, a1); | 353 | stop_critical_timing(a0, a1); |
354 | } | 354 | } |
355 | 355 | ||
356 | void notrace trace_preempt_off(unsigned long a0, unsigned long a1) | 356 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
357 | { | 357 | { |
358 | start_critical_timing(a0, a1); | 358 | start_critical_timing(a0, a1); |
359 | } | 359 | } |
@@ -395,14 +395,14 @@ static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | |||
395 | stop_irqsoff_tracer(tr); | 395 | stop_irqsoff_tracer(tr); |
396 | } | 396 | } |
397 | 397 | ||
398 | static void notrace irqsoff_tracer_open(struct trace_iterator *iter) | 398 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
399 | { | 399 | { |
400 | /* stop the trace while dumping */ | 400 | /* stop the trace while dumping */ |
401 | if (iter->tr->ctrl) | 401 | if (iter->tr->ctrl) |
402 | stop_irqsoff_tracer(iter->tr); | 402 | stop_irqsoff_tracer(iter->tr); |
403 | } | 403 | } |
404 | 404 | ||
405 | static void notrace irqsoff_tracer_close(struct trace_iterator *iter) | 405 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
406 | { | 406 | { |
407 | if (iter->tr->ctrl) | 407 | if (iter->tr->ctrl) |
408 | start_irqsoff_tracer(iter->tr); | 408 | start_irqsoff_tracer(iter->tr); |