aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-01-19 19:38:14 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:25:02 -0400
commitd1a840d7194fdd09c1bd9977e30fd391ef2a7526 (patch)
treeed77a18c98a2581ee7d7ed45a9b4305c7a3c7119 /arch
parent07ae7efcb81f95eb8e870cad21c7ba72573af7e8 (diff)
[ported from 2008.3] Add Feather-Trace x86_32 architecture dependent code
- [ported from 2008.3] Add x86_32 architecture dependent code. - Add the infrastructure for x86_32 - x86_64 integration.
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/feather_trace.h13
-rw-r--r--arch/x86/include/asm/feather_trace_32.h80
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/ft_event.c112
4 files changed, 204 insertions, 3 deletions
diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h
index f60fbed07afb..86a4303fce7a 100644
--- a/arch/x86/include/asm/feather_trace.h
+++ b/arch/x86/include/asm/feather_trace.h
@@ -1,11 +1,18 @@
1#ifndef _ARCH_FEATHER_TRACE_H 1#ifndef _ARCH_FEATHER_TRACE_H
2#define _ARCH_FEATHER_TRACE_H 2#define _ARCH_FEATHER_TRACE_H
3 3
4#include <asm/msr.h>
5
4static inline unsigned long long ft_timestamp(void) 6static inline unsigned long long ft_timestamp(void)
5{ 7{
6 unsigned long long ret; 8 return __native_read_tsc();
7 __asm__ __volatile__("rdtsc" : "=A" (ret));
8 return ret;
9} 9}
10 10
11#ifdef CONFIG_X86_32
12#include "feather_trace_32.h"
13#else
14/* not ready for integration yet */
15//#include "feather_trace_64.h"
16#endif
17
11#endif 18#endif
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h
new file mode 100644
index 000000000000..192cd09b7850
--- /dev/null
+++ b/arch/x86/include/asm/feather_trace_32.h
@@ -0,0 +1,80 @@
1/* Do not directly include this file. Include feather_trace.h instead */
2
3#define feather_callback __attribute__((regparm(0)))
4
5/*
6 * make the compiler reload any register that is not saved in
7 * a cdecl function call
8 */
9#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx"
10
11#define ft_event(id, callback) \
12 __asm__ __volatile__( \
13 "1: jmp 2f \n\t" \
14 " call " #callback " \n\t" \
15 ".section __event_table, \"aw\" \n\t" \
16 ".long " #id ", 0, 1b, 2f \n\t" \
17 ".previous \n\t" \
18 "2: \n\t" \
19 : : : CLOBBER_LIST)
20
21#define ft_event0(id, callback) \
22 __asm__ __volatile__( \
23 "1: jmp 2f \n\t" \
24 " subl $4, %%esp \n\t" \
25 " movl $" #id ", (%%esp) \n\t" \
26 " call " #callback " \n\t" \
27 " addl $4, %%esp \n\t" \
28 ".section __event_table, \"aw\" \n\t" \
29 ".long " #id ", 0, 1b, 2f \n\t" \
30 ".previous \n\t" \
31 "2: \n\t" \
32 : : : CLOBBER_LIST)
33
34#define ft_event1(id, callback, param) \
35 __asm__ __volatile__( \
36 "1: jmp 2f \n\t" \
37 " subl $8, %%esp \n\t" \
38 " movl %0, 4(%%esp) \n\t" \
39 " movl $" #id ", (%%esp) \n\t" \
40 " call " #callback " \n\t" \
41 " addl $8, %%esp \n\t" \
42 ".section __event_table, \"aw\" \n\t" \
43 ".long " #id ", 0, 1b, 2f \n\t" \
44 ".previous \n\t" \
45 "2: \n\t" \
46 : : "r" (param) : CLOBBER_LIST)
47
48#define ft_event2(id, callback, param, param2) \
49 __asm__ __volatile__( \
50 "1: jmp 2f \n\t" \
51 " subl $12, %%esp \n\t" \
52 " movl %1, 8(%%esp) \n\t" \
53 " movl %0, 4(%%esp) \n\t" \
54 " movl $" #id ", (%%esp) \n\t" \
55 " call " #callback " \n\t" \
56 " addl $12, %%esp \n\t" \
57 ".section __event_table, \"aw\" \n\t" \
58 ".long " #id ", 0, 1b, 2f \n\t" \
59 ".previous \n\t" \
60 "2: \n\t" \
61 : : "r" (param), "r" (param2) : CLOBBER_LIST)
62
63
64#define ft_event3(id, callback, p, p2, p3) \
65 __asm__ __volatile__( \
66 "1: jmp 2f \n\t" \
67 " subl $16, %%esp \n\t" \
68 " movl %2, 12(%%esp) \n\t" \
69 " movl %1, 8(%%esp) \n\t" \
70 " movl %0, 4(%%esp) \n\t" \
71 " movl $" #id ", (%%esp) \n\t" \
72 " call " #callback " \n\t" \
73 " addl $16, %%esp \n\t" \
74 ".section __event_table, \"aw\" \n\t" \
75 ".long " #id ", 0, 1b, 2f \n\t" \
76 ".previous \n\t" \
77 "2: \n\t" \
78 : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST)
79
80#define __ARCH_HAS_FEATHER_TRACE
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d8e5d0cdd678..a99b34d1b3b8 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -117,6 +117,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
117 117
118obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 118obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
119 119
120obj-$(CONFIG_FEATHER_TRACE) += ft_event.o
121
120### 122###
121# 64 bit specific files 123# 64 bit specific files
122ifeq ($(CONFIG_X86_64),y) 124ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c
new file mode 100644
index 000000000000..e07ee30dfff9
--- /dev/null
+++ b/arch/x86/kernel/ft_event.c
@@ -0,0 +1,112 @@
1#include <linux/types.h>
2
3#include <litmus/feather_trace.h>
4
5#ifdef __ARCH_HAS_FEATHER_TRACE
6/* the feather trace management functions assume
7 * exclusive access to the event table
8 */
9
10
11#define BYTE_JUMP 0xeb
12#define BYTE_JUMP_LEN 0x02
13
14/* for each event, there is an entry in the event table */
15struct trace_event {
16 long id;
17 long count;
18 long start_addr;
19 long end_addr;
20};
21
22extern struct trace_event __start___event_table[];
23extern struct trace_event __stop___event_table[];
24
25int ft_enable_event(unsigned long id)
26{
27 struct trace_event* te = __start___event_table;
28 int count = 0;
29 char* delta;
30 unsigned char* instr;
31
32 while (te < __stop___event_table) {
33 if (te->id == id && ++te->count == 1) {
34 instr = (unsigned char*) te->start_addr;
35 /* make sure we don't clobber something wrong */
36 if (*instr == BYTE_JUMP) {
37 delta = (((unsigned char*) te->start_addr) + 1);
38 *delta = 0;
39 }
40 }
41 if (te->id == id)
42 count++;
43 te++;
44 }
45
46 printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count);
47 return count;
48}
49
50int ft_disable_event(unsigned long id)
51{
52 struct trace_event* te = __start___event_table;
53 int count = 0;
54 char* delta;
55 unsigned char* instr;
56
57 while (te < __stop___event_table) {
58 if (te->id == id && --te->count == 0) {
59 instr = (unsigned char*) te->start_addr;
60 if (*instr == BYTE_JUMP) {
61 delta = (((unsigned char*) te->start_addr) + 1);
62 *delta = te->end_addr - te->start_addr -
63 BYTE_JUMP_LEN;
64 }
65 }
66 if (te->id == id)
67 count++;
68 te++;
69 }
70
71 printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count);
72 return count;
73}
74
75int ft_disable_all_events(void)
76{
77 struct trace_event* te = __start___event_table;
78 int count = 0;
79 char* delta;
80 unsigned char* instr;
81
82 while (te < __stop___event_table) {
83 if (te->count) {
84 instr = (unsigned char*) te->start_addr;
85 if (*instr == BYTE_JUMP) {
86 delta = (((unsigned char*) te->start_addr)
87 + 1);
88 *delta = te->end_addr - te->start_addr -
89 BYTE_JUMP_LEN;
90 te->count = 0;
91 count++;
92 }
93 }
94 te++;
95 }
96 return count;
97}
98
99int ft_is_event_enabled(unsigned long id)
100{
101 struct trace_event* te = __start___event_table;
102
103 while (te < __stop___event_table) {
104 if (te->id == id)
105 return te->count;
106 te++;
107 }
108 return 0;
109}
110
111#endif
112