diff options
-rw-r--r-- | arch/x86/kernel/Makefile_32 | 3 | ||||
-rw-r--r-- | arch/x86/kernel/ft_event.c | 104 | ||||
-rw-r--r-- | include/asm-x86/feather_trace.h | 93 |
3 files changed, 200 insertions, 0 deletions
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index a7bc93c27662..5f87f32ada6f 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 | |||
@@ -49,6 +49,9 @@ obj-y += pcspeaker.o | |||
49 | 49 | ||
50 | obj-$(CONFIG_SCx200) += scx200_32.o | 50 | obj-$(CONFIG_SCx200) += scx200_32.o |
51 | 51 | ||
52 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
53 | |||
54 | |||
52 | # vsyscall_32.o contains the vsyscall DSO images as __initdata. | 55 | # vsyscall_32.o contains the vsyscall DSO images as __initdata. |
53 | # We must build both images before we can assemble it. | 56 | # We must build both images before we can assemble it. |
54 | # Note: kbuild does not track this dependency due to usage of .incbin | 57 | # Note: kbuild does not track this dependency due to usage of .incbin |
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c new file mode 100644 index 000000000000..b1d80c52d793 --- /dev/null +++ b/arch/x86/kernel/ft_event.c | |||
@@ -0,0 +1,104 @@ | |||
1 | #include <linux/types.h> | ||
2 | |||
3 | #include <litmus/feather_trace.h> | ||
4 | |||
5 | /* the feather trace management functions assume | ||
6 | * exclusive access to the event table | ||
7 | */ | ||
8 | |||
9 | |||
10 | #define BYTE_JUMP 0xeb | ||
11 | #define BYTE_JUMP_LEN 0x02 | ||
12 | |||
13 | /* for each event, there is an entry in the event table */ | ||
14 | struct trace_event { | ||
15 | long id; | ||
16 | long count; | ||
17 | long start_addr; | ||
18 | long end_addr; | ||
19 | }; | ||
20 | |||
21 | extern struct trace_event __start___event_table[]; | ||
22 | extern struct trace_event __stop___event_table[]; | ||
23 | |||
24 | int ft_enable_event(unsigned long id) | ||
25 | { | ||
26 | struct trace_event* te = __start___event_table; | ||
27 | int count = 0; | ||
28 | char* delta; | ||
29 | unsigned char* instr; | ||
30 | |||
31 | while (te < __stop___event_table) { | ||
32 | if (te->id == id && ++te->count == 1) { | ||
33 | instr = (unsigned char*) te->start_addr; | ||
34 | /* make sure we don't clobber something wrong */ | ||
35 | if (*instr == BYTE_JUMP) { | ||
36 | delta = (((unsigned char*) te->start_addr) + 1); | ||
37 | *delta = 0; | ||
38 | } | ||
39 | } | ||
40 | if (te->id == id) | ||
41 | count++; | ||
42 | te++; | ||
43 | } | ||
44 | return count; | ||
45 | } | ||
46 | |||
47 | int ft_disable_event(unsigned long id) | ||
48 | { | ||
49 | struct trace_event* te = __start___event_table; | ||
50 | int count = 0; | ||
51 | char* delta; | ||
52 | unsigned char* instr; | ||
53 | |||
54 | while (te < __stop___event_table) { | ||
55 | if (te->id == id && --te->count == 0) { | ||
56 | instr = (unsigned char*) te->start_addr; | ||
57 | if (*instr == BYTE_JUMP) { | ||
58 | delta = (((unsigned char*) te->start_addr) + 1); | ||
59 | *delta = te->end_addr - te->start_addr - | ||
60 | BYTE_JUMP_LEN; | ||
61 | } | ||
62 | } | ||
63 | if (te->id == id) | ||
64 | count++; | ||
65 | te++; | ||
66 | } | ||
67 | return count; | ||
68 | } | ||
69 | |||
70 | int ft_disable_all_events(void) | ||
71 | { | ||
72 | struct trace_event* te = __start___event_table; | ||
73 | int count = 0; | ||
74 | char* delta; | ||
75 | unsigned char* instr; | ||
76 | |||
77 | while (te < __stop___event_table) { | ||
78 | if (te->count) { | ||
79 | instr = (unsigned char*) te->start_addr; | ||
80 | if (*instr == BYTE_JUMP) { | ||
81 | delta = (((unsigned char*) te->start_addr) | ||
82 | + 1); | ||
83 | *delta = te->end_addr - te->start_addr - | ||
84 | BYTE_JUMP_LEN; | ||
85 | te->count = 0; | ||
86 | count++; | ||
87 | } | ||
88 | } | ||
89 | te++; | ||
90 | } | ||
91 | return count; | ||
92 | } | ||
93 | |||
94 | int ft_is_event_enabled(unsigned long id) | ||
95 | { | ||
96 | struct trace_event* te = __start___event_table; | ||
97 | |||
98 | while (te < __stop___event_table) { | ||
99 | if (te->id == id) | ||
100 | return te->count; | ||
101 | te++; | ||
102 | } | ||
103 | return 0; | ||
104 | } | ||
diff --git a/include/asm-x86/feather_trace.h b/include/asm-x86/feather_trace.h index f60fbed07afb..253067eceeec 100644 --- a/include/asm-x86/feather_trace.h +++ b/include/asm-x86/feather_trace.h | |||
@@ -1,6 +1,97 @@ | |||
1 | #ifndef _ARCH_FEATHER_TRACE_H | 1 | #ifndef _ARCH_FEATHER_TRACE_H |
2 | #define _ARCH_FEATHER_TRACE_H | 2 | #define _ARCH_FEATHER_TRACE_H |
3 | 3 | ||
4 | static inline int fetch_and_inc(int *val) | ||
5 | { | ||
6 | int ret = 1; | ||
7 | __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" ); | ||
8 | return ret; | ||
9 | } | ||
10 | |||
11 | static inline int fetch_and_dec(int *val) | ||
12 | { | ||
13 | int ret = -1; | ||
14 | __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" ); | ||
15 | return ret; | ||
16 | } | ||
17 | |||
18 | #define feather_callback __attribute__((regparm(0))) | ||
19 | |||
20 | /* make the compiler reload any register that is not saved in | ||
21 | * a cdecl function call | ||
22 | */ | ||
23 | #define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
24 | |||
25 | #define ft_event(id, callback) \ | ||
26 | __asm__ __volatile__( \ | ||
27 | "1: jmp 2f \n\t" \ | ||
28 | " call " #callback " \n\t" \ | ||
29 | ".section __event_table, \"aw\" \n\t" \ | ||
30 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
31 | ".previous \n\t" \ | ||
32 | "2: \n\t" \ | ||
33 | : : : CLOBBER_LIST) | ||
34 | |||
35 | #define ft_event0(id, callback) \ | ||
36 | __asm__ __volatile__( \ | ||
37 | "1: jmp 2f \n\t" \ | ||
38 | " subl $4, %%esp \n\t" \ | ||
39 | " movl $" #id ", (%%esp) \n\t" \ | ||
40 | " call " #callback " \n\t" \ | ||
41 | " addl $4, %%esp \n\t" \ | ||
42 | ".section __event_table, \"aw\" \n\t" \ | ||
43 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
44 | ".previous \n\t" \ | ||
45 | "2: \n\t" \ | ||
46 | : : : CLOBBER_LIST) | ||
47 | |||
48 | #define ft_event1(id, callback, param) \ | ||
49 | __asm__ __volatile__( \ | ||
50 | "1: jmp 2f \n\t" \ | ||
51 | " subl $8, %%esp \n\t" \ | ||
52 | " movl %0, 4(%%esp) \n\t" \ | ||
53 | " movl $" #id ", (%%esp) \n\t" \ | ||
54 | " call " #callback " \n\t" \ | ||
55 | " addl $8, %%esp \n\t" \ | ||
56 | ".section __event_table, \"aw\" \n\t" \ | ||
57 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
58 | ".previous \n\t" \ | ||
59 | "2: \n\t" \ | ||
60 | : : "r" (param) : CLOBBER_LIST) | ||
61 | |||
62 | #define ft_event2(id, callback, param, param2) \ | ||
63 | __asm__ __volatile__( \ | ||
64 | "1: jmp 2f \n\t" \ | ||
65 | " subl $12, %%esp \n\t" \ | ||
66 | " movl %1, 8(%%esp) \n\t" \ | ||
67 | " movl %0, 4(%%esp) \n\t" \ | ||
68 | " movl $" #id ", (%%esp) \n\t" \ | ||
69 | " call " #callback " \n\t" \ | ||
70 | " addl $12, %%esp \n\t" \ | ||
71 | ".section __event_table, \"aw\" \n\t" \ | ||
72 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
73 | ".previous \n\t" \ | ||
74 | "2: \n\t" \ | ||
75 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
76 | |||
77 | |||
78 | #define ft_event3(id, callback, p, p2, p3) \ | ||
79 | __asm__ __volatile__( \ | ||
80 | "1: jmp 2f \n\t" \ | ||
81 | " subl $16, %%esp \n\t" \ | ||
82 | " movl %1, 12(%%esp) \n\t" \ | ||
83 | " movl %1, 8(%%esp) \n\t" \ | ||
84 | " movl %0, 4(%%esp) \n\t" \ | ||
85 | " movl $" #id ", (%%esp) \n\t" \ | ||
86 | " call " #callback " \n\t" \ | ||
87 | " addl $16, %%esp \n\t" \ | ||
88 | ".section __event_table, \"aw\" \n\t" \ | ||
89 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
90 | ".previous \n\t" \ | ||
91 | "2: \n\t" \ | ||
92 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
93 | |||
94 | |||
4 | static inline unsigned long long ft_timestamp(void) | 95 | static inline unsigned long long ft_timestamp(void) |
5 | { | 96 | { |
6 | unsigned long long ret; | 97 | unsigned long long ret; |
@@ -8,4 +99,6 @@ static inline unsigned long long ft_timestamp(void) | |||
8 | return ret; | 99 | return ret; |
9 | } | 100 | } |
10 | 101 | ||
102 | #define __ARCH_HAS_FEATHER_TRACE | ||
103 | |||
11 | #endif | 104 | #endif |