aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/ext4.h54
-rw-r--r--include/trace/events/irq.h6
-rw-r--r--include/trace/events/jbd2.h6
-rw-r--r--include/trace/events/kmem.h6
-rw-r--r--include/trace/events/lockdep.h6
-rw-r--r--include/trace/events/sched.h6
-rw-r--r--include/trace/events/skb.h6
-rw-r--r--include/trace/events/workqueue.h6
-rw-r--r--include/trace/ftrace.h172
10 files changed, 177 insertions, 97 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index d6b05f42dd44..9a74b468a229 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -1,3 +1,6 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
1#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_BLOCK_H 5#define _TRACE_BLOCK_H
3 6
@@ -5,9 +8,6 @@
5#include <linux/blkdev.h> 8#include <linux/blkdev.h>
6#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
7 10
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM block
10
11TRACE_EVENT(block_rq_abort, 11TRACE_EVENT(block_rq_abort,
12 12
13 TP_PROTO(struct request_queue *q, struct request *rq), 13 TP_PROTO(struct request_queue *q, struct request *rq),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index acf4cc9cd36d..7d8b5bc74185 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -1,9 +1,9 @@
1#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_EXT4_H
3
4#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
5#define TRACE_SYSTEM ext4 2#define TRACE_SYSTEM ext4
6 3
4#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EXT4_H
6
7#include <linux/writeback.h> 7#include <linux/writeback.h>
8#include "../../../fs/ext4/ext4.h" 8#include "../../../fs/ext4/ext4.h"
9#include "../../../fs/ext4/mballoc.h" 9#include "../../../fs/ext4/mballoc.h"
@@ -34,7 +34,8 @@ TRACE_EVENT(ext4_free_inode,
34 34
35 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", 35 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
36 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, 36 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode,
37 __entry->uid, __entry->gid, __entry->blocks) 37 __entry->uid, __entry->gid,
38 (unsigned long long) __entry->blocks)
38); 39);
39 40
40TRACE_EVENT(ext4_request_inode, 41TRACE_EVENT(ext4_request_inode,
@@ -189,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end,
189 __entry->copied) 190 __entry->copied)
190); 191);
191 192
192TRACE_EVENT(ext4_da_writepage, 193TRACE_EVENT(ext4_writepage,
193 TP_PROTO(struct inode *inode, struct page *page), 194 TP_PROTO(struct inode *inode, struct page *page),
194 195
195 TP_ARGS(inode, page), 196 TP_ARGS(inode, page),
@@ -341,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end,
341 __entry->copied) 342 __entry->copied)
342); 343);
343 344
344TRACE_EVENT(ext4_normal_writepage,
345 TP_PROTO(struct inode *inode, struct page *page),
346
347 TP_ARGS(inode, page),
348
349 TP_STRUCT__entry(
350 __field( dev_t, dev )
351 __field( ino_t, ino )
352 __field( pgoff_t, index )
353 ),
354
355 TP_fast_assign(
356 __entry->dev = inode->i_sb->s_dev;
357 __entry->ino = inode->i_ino;
358 __entry->index = page->index;
359 ),
360
361 TP_printk("dev %s ino %lu page_index %lu",
362 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
363);
364
365TRACE_EVENT(ext4_journalled_writepage,
366 TP_PROTO(struct inode *inode, struct page *page),
367
368 TP_ARGS(inode, page),
369
370 TP_STRUCT__entry(
371 __field( dev_t, dev )
372 __field( ino_t, ino )
373 __field( pgoff_t, index )
374
375 ),
376
377 TP_fast_assign(
378 __entry->dev = inode->i_sb->s_dev;
379 __entry->ino = inode->i_ino;
380 __entry->index = page->index;
381 ),
382
383 TP_printk("dev %s ino %lu page_index %lu",
384 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
385);
386
387TRACE_EVENT(ext4_discard_blocks, 345TRACE_EVENT(ext4_discard_blocks,
388 TP_PROTO(struct super_block *sb, unsigned long long blk, 346 TP_PROTO(struct super_block *sb, unsigned long long blk,
389 unsigned long long count), 347 unsigned long long count),
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index b0c7ede55eb1..1cb0c3aa11e6 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
1#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_IRQ_H 5#define _TRACE_IRQ_H
3 6
4#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
5#include <linux/interrupt.h> 8#include <linux/interrupt.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM irq
9
10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } 10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
11#define show_softirq_name(val) \ 11#define show_softirq_name(val) \
12 __print_symbolic(val, \ 12 __print_symbolic(val, \
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 845b0b4b48fd..10813fa0c8d0 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM jbd2
3
1#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_JBD2_H 5#define _TRACE_JBD2_H
3 6
4#include <linux/jbd2.h> 7#include <linux/jbd2.h>
5#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM jbd2
9
10TRACE_EVENT(jbd2_checkpoint, 10TRACE_EVENT(jbd2_checkpoint,
11 11
12 TP_PROTO(journal_t *journal, int result), 12 TP_PROTO(journal_t *journal, int result),
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 9baba50d6512..1493c541f9c4 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
1#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KMEM_H 5#define _TRACE_KMEM_H
3 6
4#include <linux/types.h> 7#include <linux/types.h>
5#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kmem
9
10/* 10/*
11 * The order of these masks is important. Matching masks will be seen 11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves. 12 * first and the left over flags will end up showing by themselves.
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h
index 0e956c9dfd7e..bcf1d209a00d 100644
--- a/include/trace/events/lockdep.h
+++ b/include/trace/events/lockdep.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM lockdep
3
1#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_LOCKDEP_H 5#define _TRACE_LOCKDEP_H
3 6
4#include <linux/lockdep.h> 7#include <linux/lockdep.h>
5#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lockdep
9
10#ifdef CONFIG_LOCKDEP 10#ifdef CONFIG_LOCKDEP
11 11
12TRACE_EVENT(lock_acquire, 12TRACE_EVENT(lock_acquire,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 24ab5bcff7b2..8949bb7eb082 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
1#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SCHED_H 5#define _TRACE_SCHED_H
3 6
4#include <linux/sched.h> 7#include <linux/sched.h>
5#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM sched
9
10/* 10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread: 11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */ 12 */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 1e8fabb57c06..e499863b9669 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -1,12 +1,12 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM skb
3
1#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SKB_H 5#define _TRACE_SKB_H
3 6
4#include <linux/skbuff.h> 7#include <linux/skbuff.h>
5#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
6 9
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM skb
9
10/* 10/*
11 * Tracepoint for free an sk_buff: 11 * Tracepoint for free an sk_buff:
12 */ 12 */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 035f1bff288e..fcfd9a1e4b96 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -1,3 +1,6 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM workqueue
3
1#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_WORKQUEUE_H 5#define _TRACE_WORKQUEUE_H
3 6
@@ -5,9 +8,6 @@
5#include <linux/sched.h> 8#include <linux/sched.h>
6#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
7 10
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM workqueue
10
11TRACE_EVENT(workqueue_insertion, 11TRACE_EVENT(workqueue_insertion,
12 12
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), 13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e5..7fb16d90e7b1 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
144#undef TP_fast_assign 144#undef TP_fast_assign
145#define TP_fast_assign(args...) args 145#define TP_fast_assign(args...) args
146 146
147#undef TP_perf_assign
148#define TP_perf_assign(args...)
149
147#undef TRACE_EVENT 150#undef TRACE_EVENT
148#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
149static int \ 152static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
345 348
346#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
347 350
351#ifdef CONFIG_EVENT_PROFILE
352
353/*
354 * Generate the functions needed for tracepoint perf_counter support.
355 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
359 * {
360 * int ret = 0;
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * }
367 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
369 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count))
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * }
373 *
374 */
375
376#undef TRACE_EVENT
377#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
378 \
379static void ftrace_profile_##call(proto); \
380 \
381static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
382{ \
383 int ret = 0; \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389} \
390 \
391static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
392{ \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \
394 unregister_trace_##call(ftrace_profile_##call); \
395}
396
397#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
398
399#endif
400
348/* 401/*
349 * Stage 4 of the trace events. 402 * Stage 4 of the trace events.
350 * 403 *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
447#define TP_FMT(fmt, args...) fmt "\n", ##args 500#define TP_FMT(fmt, args...) fmt "\n", ##args
448 501
449#ifdef CONFIG_EVENT_PROFILE 502#ifdef CONFIG_EVENT_PROFILE
450#define _TRACE_PROFILE(call, proto, args) \
451static void ftrace_profile_##call(proto) \
452{ \
453 extern void perf_tpcounter_event(int); \
454 perf_tpcounter_event(event_##call.id); \
455} \
456 \
457static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
458{ \
459 int ret = 0; \
460 \
461 if (!atomic_inc_return(&event_call->profile_count)) \
462 ret = register_trace_##call(ftrace_profile_##call); \
463 \
464 return ret; \
465} \
466 \
467static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
468{ \
469 if (atomic_add_negative(-1, &event_call->profile_count)) \
470 unregister_trace_##call(ftrace_profile_##call); \
471}
472 503
473#define _TRACE_PROFILE_INIT(call) \ 504#define _TRACE_PROFILE_INIT(call) \
474 .profile_count = ATOMIC_INIT(-1), \ 505 .profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
476 .profile_disable = ftrace_profile_disable_##call, 507 .profile_disable = ftrace_profile_disable_##call,
477 508
478#else 509#else
479#define _TRACE_PROFILE(call, proto, args)
480#define _TRACE_PROFILE_INIT(call) 510#define _TRACE_PROFILE_INIT(call)
481#endif 511#endif
482 512
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
502 532
503#undef TRACE_EVENT 533#undef TRACE_EVENT
504#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 534#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
505_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
506 \ 535 \
507static struct ftrace_event_call event_##call; \ 536static struct ftrace_event_call event_##call; \
508 \ 537 \
@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
586 615
587#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 616#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
588 617
589#undef _TRACE_PROFILE 618/*
619 * Define the insertion callback to profile events
620 *
621 * The job is very similar to ftrace_raw_event_<call> except that we don't
622 * insert in the ring buffer but in a perf counter.
623 *
624 * static void ftrace_profile_<call>(proto)
625 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags;
632 * int __entry_size;
633 * int __data_size;
634 * int pc;
635 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count();
638 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
640 * __entry_size = __data_size + sizeof(*entry);
641 *
642 * do {
643 * char raw_data[__entry_size]; <- allocate our sample in the stack
644 * struct trace_entry *ent;
645 *
646 * entry = (struct ftrace_raw_<call> *)raw_data;
647 * ent = &entry->ent;
648 * tracing_generic_entry_update(ent, irq_flags, pc);
649 * ent->type = event_call->id;
650 *
651 * <tstruct> <- do some jobs with dynamic arrays
652 *
653 * <assign> <- affect our values
654 *
655 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
656 * __entry_size); <- submit them to perf counter
657 * } while (0);
658 *
659 * }
660 */
661
662#ifdef CONFIG_EVENT_PROFILE
663
664#undef __perf_addr
665#define __perf_addr(a) __addr = (a)
666
667#undef __perf_count
668#define __perf_count(c) __count = (c)
669
670#undef TRACE_EVENT
671#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
672static void ftrace_profile_##call(proto) \
673{ \
674 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
675 struct ftrace_event_call *event_call = &event_##call; \
676 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
677 struct ftrace_raw_##call *entry; \
678 u64 __addr = 0, __count = 1; \
679 unsigned long irq_flags; \
680 int __entry_size; \
681 int __data_size; \
682 int pc; \
683 \
684 local_save_flags(irq_flags); \
685 pc = preempt_count(); \
686 \
687 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
688 __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
689 \
690 do { \
691 char raw_data[__entry_size]; \
692 struct trace_entry *ent; \
693 \
694 entry = (struct ftrace_raw_##call *)raw_data; \
695 ent = &entry->ent; \
696 tracing_generic_entry_update(ent, irq_flags, pc); \
697 ent->type = event_call->id; \
698 \
699 tstruct \
700 \
701 { assign; } \
702 \
703 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
704 __entry_size); \
705 } while (0); \
706 \
707}
708
709#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
710#endif /* CONFIG_EVENT_PROFILE */
711
590#undef _TRACE_PROFILE_INIT 712#undef _TRACE_PROFILE_INIT
591 713