diff options
-rw-r--r-- | arch/x86/include/asm/feather_trace.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/feather_trace_32.h | 80 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ft_event.c | 112 | ||||
-rw-r--r-- | include/litmus/feather_trace.h | 25 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 4 | ||||
-rw-r--r-- | include/litmus/trace.h | 4 |
7 files changed, 220 insertions, 20 deletions
diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h index f60fbed07afb..86a4303fce7a 100644 --- a/arch/x86/include/asm/feather_trace.h +++ b/arch/x86/include/asm/feather_trace.h | |||
@@ -1,11 +1,18 @@ | |||
1 | #ifndef _ARCH_FEATHER_TRACE_H | 1 | #ifndef _ARCH_FEATHER_TRACE_H |
2 | #define _ARCH_FEATHER_TRACE_H | 2 | #define _ARCH_FEATHER_TRACE_H |
3 | 3 | ||
4 | #include <asm/msr.h> | ||
5 | |||
4 | static inline unsigned long long ft_timestamp(void) | 6 | static inline unsigned long long ft_timestamp(void) |
5 | { | 7 | { |
6 | unsigned long long ret; | 8 | return __native_read_tsc(); |
7 | __asm__ __volatile__("rdtsc" : "=A" (ret)); | ||
8 | return ret; | ||
9 | } | 9 | } |
10 | 10 | ||
11 | #ifdef CONFIG_X86_32 | ||
12 | #include "feather_trace_32.h" | ||
13 | #else | ||
14 | /* not ready for integration yet */ | ||
15 | //#include "feather_trace_64.h" | ||
16 | #endif | ||
17 | |||
11 | #endif | 18 | #endif |
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h new file mode 100644 index 000000000000..192cd09b7850 --- /dev/null +++ b/arch/x86/include/asm/feather_trace_32.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* Do not directly include this file. Include feather_trace.h instead */ | ||
2 | |||
3 | #define feather_callback __attribute__((regparm(0))) | ||
4 | |||
5 | /* | ||
6 | * make the compiler reload any register that is not saved in | ||
7 | * a cdecl function call | ||
8 | */ | ||
9 | #define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
10 | |||
11 | #define ft_event(id, callback) \ | ||
12 | __asm__ __volatile__( \ | ||
13 | "1: jmp 2f \n\t" \ | ||
14 | " call " #callback " \n\t" \ | ||
15 | ".section __event_table, \"aw\" \n\t" \ | ||
16 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
17 | ".previous \n\t" \ | ||
18 | "2: \n\t" \ | ||
19 | : : : CLOBBER_LIST) | ||
20 | |||
21 | #define ft_event0(id, callback) \ | ||
22 | __asm__ __volatile__( \ | ||
23 | "1: jmp 2f \n\t" \ | ||
24 | " subl $4, %%esp \n\t" \ | ||
25 | " movl $" #id ", (%%esp) \n\t" \ | ||
26 | " call " #callback " \n\t" \ | ||
27 | " addl $4, %%esp \n\t" \ | ||
28 | ".section __event_table, \"aw\" \n\t" \ | ||
29 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
30 | ".previous \n\t" \ | ||
31 | "2: \n\t" \ | ||
32 | : : : CLOBBER_LIST) | ||
33 | |||
34 | #define ft_event1(id, callback, param) \ | ||
35 | __asm__ __volatile__( \ | ||
36 | "1: jmp 2f \n\t" \ | ||
37 | " subl $8, %%esp \n\t" \ | ||
38 | " movl %0, 4(%%esp) \n\t" \ | ||
39 | " movl $" #id ", (%%esp) \n\t" \ | ||
40 | " call " #callback " \n\t" \ | ||
41 | " addl $8, %%esp \n\t" \ | ||
42 | ".section __event_table, \"aw\" \n\t" \ | ||
43 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
44 | ".previous \n\t" \ | ||
45 | "2: \n\t" \ | ||
46 | : : "r" (param) : CLOBBER_LIST) | ||
47 | |||
48 | #define ft_event2(id, callback, param, param2) \ | ||
49 | __asm__ __volatile__( \ | ||
50 | "1: jmp 2f \n\t" \ | ||
51 | " subl $12, %%esp \n\t" \ | ||
52 | " movl %1, 8(%%esp) \n\t" \ | ||
53 | " movl %0, 4(%%esp) \n\t" \ | ||
54 | " movl $" #id ", (%%esp) \n\t" \ | ||
55 | " call " #callback " \n\t" \ | ||
56 | " addl $12, %%esp \n\t" \ | ||
57 | ".section __event_table, \"aw\" \n\t" \ | ||
58 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
59 | ".previous \n\t" \ | ||
60 | "2: \n\t" \ | ||
61 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
62 | |||
63 | |||
64 | #define ft_event3(id, callback, p, p2, p3) \ | ||
65 | __asm__ __volatile__( \ | ||
66 | "1: jmp 2f \n\t" \ | ||
67 | " subl $16, %%esp \n\t" \ | ||
68 | " movl %2, 12(%%esp) \n\t" \ | ||
69 | " movl %1, 8(%%esp) \n\t" \ | ||
70 | " movl %0, 4(%%esp) \n\t" \ | ||
71 | " movl $" #id ", (%%esp) \n\t" \ | ||
72 | " call " #callback " \n\t" \ | ||
73 | " addl $16, %%esp \n\t" \ | ||
74 | ".section __event_table, \"aw\" \n\t" \ | ||
75 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
76 | ".previous \n\t" \ | ||
77 | "2: \n\t" \ | ||
78 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
79 | |||
80 | #define __ARCH_HAS_FEATHER_TRACE | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d8e5d0cdd678..a99b34d1b3b8 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -117,6 +117,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | |||
117 | 117 | ||
118 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 118 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
119 | 119 | ||
120 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
121 | |||
120 | ### | 122 | ### |
121 | # 64 bit specific files | 123 | # 64 bit specific files |
122 | ifeq ($(CONFIG_X86_64),y) | 124 | ifeq ($(CONFIG_X86_64),y) |
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c new file mode 100644 index 000000000000..e07ee30dfff9 --- /dev/null +++ b/arch/x86/kernel/ft_event.c | |||
@@ -0,0 +1,112 @@ | |||
1 | #include <linux/types.h> | ||
2 | |||
3 | #include <litmus/feather_trace.h> | ||
4 | |||
5 | #ifdef __ARCH_HAS_FEATHER_TRACE | ||
6 | /* the feather trace management functions assume | ||
7 | * exclusive access to the event table | ||
8 | */ | ||
9 | |||
10 | |||
11 | #define BYTE_JUMP 0xeb | ||
12 | #define BYTE_JUMP_LEN 0x02 | ||
13 | |||
14 | /* for each event, there is an entry in the event table */ | ||
15 | struct trace_event { | ||
16 | long id; | ||
17 | long count; | ||
18 | long start_addr; | ||
19 | long end_addr; | ||
20 | }; | ||
21 | |||
22 | extern struct trace_event __start___event_table[]; | ||
23 | extern struct trace_event __stop___event_table[]; | ||
24 | |||
25 | int ft_enable_event(unsigned long id) | ||
26 | { | ||
27 | struct trace_event* te = __start___event_table; | ||
28 | int count = 0; | ||
29 | char* delta; | ||
30 | unsigned char* instr; | ||
31 | |||
32 | while (te < __stop___event_table) { | ||
33 | if (te->id == id && ++te->count == 1) { | ||
34 | instr = (unsigned char*) te->start_addr; | ||
35 | /* make sure we don't clobber something wrong */ | ||
36 | if (*instr == BYTE_JUMP) { | ||
37 | delta = (((unsigned char*) te->start_addr) + 1); | ||
38 | *delta = 0; | ||
39 | } | ||
40 | } | ||
41 | if (te->id == id) | ||
42 | count++; | ||
43 | te++; | ||
44 | } | ||
45 | |||
46 | printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); | ||
47 | return count; | ||
48 | } | ||
49 | |||
50 | int ft_disable_event(unsigned long id) | ||
51 | { | ||
52 | struct trace_event* te = __start___event_table; | ||
53 | int count = 0; | ||
54 | char* delta; | ||
55 | unsigned char* instr; | ||
56 | |||
57 | while (te < __stop___event_table) { | ||
58 | if (te->id == id && --te->count == 0) { | ||
59 | instr = (unsigned char*) te->start_addr; | ||
60 | if (*instr == BYTE_JUMP) { | ||
61 | delta = (((unsigned char*) te->start_addr) + 1); | ||
62 | *delta = te->end_addr - te->start_addr - | ||
63 | BYTE_JUMP_LEN; | ||
64 | } | ||
65 | } | ||
66 | if (te->id == id) | ||
67 | count++; | ||
68 | te++; | ||
69 | } | ||
70 | |||
71 | printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); | ||
72 | return count; | ||
73 | } | ||
74 | |||
75 | int ft_disable_all_events(void) | ||
76 | { | ||
77 | struct trace_event* te = __start___event_table; | ||
78 | int count = 0; | ||
79 | char* delta; | ||
80 | unsigned char* instr; | ||
81 | |||
82 | while (te < __stop___event_table) { | ||
83 | if (te->count) { | ||
84 | instr = (unsigned char*) te->start_addr; | ||
85 | if (*instr == BYTE_JUMP) { | ||
86 | delta = (((unsigned char*) te->start_addr) | ||
87 | + 1); | ||
88 | *delta = te->end_addr - te->start_addr - | ||
89 | BYTE_JUMP_LEN; | ||
90 | te->count = 0; | ||
91 | count++; | ||
92 | } | ||
93 | } | ||
94 | te++; | ||
95 | } | ||
96 | return count; | ||
97 | } | ||
98 | |||
99 | int ft_is_event_enabled(unsigned long id) | ||
100 | { | ||
101 | struct trace_event* te = __start___event_table; | ||
102 | |||
103 | while (te < __stop___event_table) { | ||
104 | if (te->id == id) | ||
105 | return te->count; | ||
106 | te++; | ||
107 | } | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | #endif | ||
112 | |||
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h index eef8af7a414e..7d27e763406f 100644 --- a/include/litmus/feather_trace.h +++ b/include/litmus/feather_trace.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _FEATHER_TRACE_H_ | 1 | #ifndef _FEATHER_TRACE_H_ |
2 | #define _FEATHER_TRACE_H_ | 2 | #define _FEATHER_TRACE_H_ |
3 | 3 | ||
4 | #include <asm/atomic.h> | ||
4 | #include <asm/feather_trace.h> | 5 | #include <asm/feather_trace.h> |
5 | 6 | ||
6 | int ft_enable_event(unsigned long id); | 7 | int ft_enable_event(unsigned long id); |
@@ -8,6 +9,17 @@ int ft_disable_event(unsigned long id); | |||
8 | int ft_is_event_enabled(unsigned long id); | 9 | int ft_is_event_enabled(unsigned long id); |
9 | int ft_disable_all_events(void); | 10 | int ft_disable_all_events(void); |
10 | 11 | ||
12 | /* atomic_* funcitons are inline anyway */ | ||
13 | static inline int fetch_and_inc(int *val) | ||
14 | { | ||
15 | return atomic_add_return(1, (atomic_t*) val) - 1; | ||
16 | } | ||
17 | |||
18 | static inline int fetch_and_dec(int *val) | ||
19 | { | ||
20 | return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
21 | } | ||
22 | |||
11 | #ifndef __ARCH_HAS_FEATHER_TRACE | 23 | #ifndef __ARCH_HAS_FEATHER_TRACE |
12 | /* provide default implementation */ | 24 | /* provide default implementation */ |
13 | 25 | ||
@@ -32,19 +44,6 @@ extern int ft_events[MAX_EVENTS]; | |||
32 | #define ft_event3(id, callback, p, p2, p3) \ | 44 | #define ft_event3(id, callback, p, p2, p3) \ |
33 | if (ft_events[id]) callback(id, p, p2, p3); | 45 | if (ft_events[id]) callback(id, p, p2, p3); |
34 | 46 | ||
35 | #include <asm/atomic.h> | ||
36 | |||
37 | static inline int fetch_and_inc(int *val) | ||
38 | { | ||
39 | return atomic_add_return(1, (atomic_t*) val) - 1; | ||
40 | } | ||
41 | |||
42 | static inline int fetch_and_dec(int *val) | ||
43 | { | ||
44 | return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
45 | } | ||
46 | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | |||
50 | #endif | 49 | #endif |
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index aae6ac27fe1b..e1b0c9712b5f 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -167,12 +167,12 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
167 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) | 167 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) |
168 | #define sched_trace_task_completion(t, forced) \ | 168 | #define sched_trace_task_completion(t, forced) \ |
169 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ | 169 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ |
170 | forced) | 170 | (unsigned long) forced) |
171 | #define sched_trace_task_block(t) \ | 171 | #define sched_trace_task_block(t) \ |
172 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) | 172 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) |
173 | #define sched_trace_task_resume(t) \ | 173 | #define sched_trace_task_resume(t) \ |
174 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) | 174 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) |
175 | 175 | /* when is a pointer, it does not need an explicit cast to unsigned long */ | |
176 | #define sched_trace_sys_release(when) \ | 176 | #define sched_trace_sys_release(when) \ |
177 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) | 177 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) |
178 | 178 | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e8e0c7b6cc6a..b32c71180774 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -32,13 +32,13 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu) | |||
32 | 32 | ||
33 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) | 33 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) |
34 | 34 | ||
35 | #define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def) | 35 | #define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) |
36 | 36 | ||
37 | #define TTIMESTAMP(id, task) \ | 37 | #define TTIMESTAMP(id, task) \ |
38 | ft_event1(id, save_timestamp_task, (unsigned long) task) | 38 | ft_event1(id, save_timestamp_task, (unsigned long) task) |
39 | 39 | ||
40 | #define CTIMESTAMP(id, cpu) \ | 40 | #define CTIMESTAMP(id, cpu) \ |
41 | ft_event1(id, save_timestamp_cpu, cpu) | 41 | ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) |
42 | 42 | ||
43 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | 43 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ |
44 | 44 | ||