diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:43 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:33:35 -0400 |
commit | 3c1720f00bb619302ba19d55986ab565e74d06db (patch) | |
tree | d58aaa54bc8e7a465597f385de36204c3b0b9cf8 /arch/x86/kernel | |
parent | dfa60aba04dae7833d75b2e2be124bb7cfb8239f (diff) |
ftrace: move memory management out of arch code
This patch moves the memory management of the ftrace
records out of the arch code and into the generic code
making the arch code simpler.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/ftrace.c | 183 |
1 files changed, 29 insertions, 154 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 2e060c58b860..b69795efa226 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -23,25 +23,6 @@ | |||
23 | /* Long is fine, even if it is only 4 bytes ;-) */ | 23 | /* Long is fine, even if it is only 4 bytes ;-) */ |
24 | static long *ftrace_nop; | 24 | static long *ftrace_nop; |
25 | 25 | ||
26 | struct ftrace_record { | ||
27 | struct dyn_ftrace rec; | ||
28 | int failed; | ||
29 | } __attribute__((packed)); | ||
30 | |||
31 | struct ftrace_page { | ||
32 | struct ftrace_page *next; | ||
33 | int index; | ||
34 | struct ftrace_record records[]; | ||
35 | } __attribute__((packed)); | ||
36 | |||
37 | #define ENTRIES_PER_PAGE \ | ||
38 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record)) | ||
39 | |||
40 | /* estimate from running different kernels */ | ||
41 | #define NR_TO_INIT 10000 | ||
42 | |||
43 | #define MCOUNT_ADDR ((long)(&mcount)) | ||
44 | |||
45 | union ftrace_code_union { | 26 | union ftrace_code_union { |
46 | char code[5]; | 27 | char code[5]; |
47 | struct { | 28 | struct { |
@@ -50,33 +31,41 @@ union ftrace_code_union { | |||
50 | } __attribute__((packed)); | 31 | } __attribute__((packed)); |
51 | }; | 32 | }; |
52 | 33 | ||
53 | static struct ftrace_page *ftrace_pages_start; | 34 | notrace int ftrace_ip_converted(unsigned long ip) |
54 | static struct ftrace_page *ftrace_pages; | ||
55 | |||
56 | notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | ||
57 | { | 35 | { |
58 | struct ftrace_record *rec; | ||
59 | unsigned long save; | 36 | unsigned long save; |
60 | 37 | ||
61 | ip -= CALL_BACK; | 38 | ip -= CALL_BACK; |
62 | save = *(long *)ip; | 39 | save = *(long *)ip; |
63 | 40 | ||
64 | /* If this was already converted, skip it */ | 41 | return save == *ftrace_nop; |
65 | if (save == *ftrace_nop) | 42 | } |
66 | return NULL; | ||
67 | 43 | ||
68 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 44 | static int notrace ftrace_calc_offset(long ip, long addr) |
69 | if (!ftrace_pages->next) | 45 | { |
70 | return NULL; | 46 | return (int)(addr - ip); |
71 | ftrace_pages = ftrace_pages->next; | 47 | } |
72 | } | ||
73 | 48 | ||
74 | rec = &ftrace_pages->records[ftrace_pages->index++]; | 49 | notrace unsigned char *ftrace_nop_replace(void) |
50 | { | ||
51 | return (char *)ftrace_nop; | ||
52 | } | ||
53 | |||
54 | notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
55 | { | ||
56 | static union ftrace_code_union calc; | ||
75 | 57 | ||
76 | return &rec->rec; | 58 | calc.e8 = 0xe8; |
59 | calc.offset = ftrace_calc_offset(ip, addr); | ||
60 | |||
61 | /* | ||
62 | * No locking needed, this must be called via kstop_machine | ||
63 | * which in essence is like running on a uniprocessor machine. | ||
64 | */ | ||
65 | return calc.code; | ||
77 | } | 66 | } |
78 | 67 | ||
79 | static int notrace | 68 | notrace int |
80 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 69 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
81 | unsigned char *new_code) | 70 | unsigned char *new_code) |
82 | { | 71 | { |
@@ -86,6 +75,9 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
86 | unsigned char newch = new_code[4]; | 75 | unsigned char newch = new_code[4]; |
87 | int faulted = 0; | 76 | int faulted = 0; |
88 | 77 | ||
78 | /* move the IP back to the start of the call */ | ||
79 | ip -= CALL_BACK; | ||
80 | |||
89 | /* | 81 | /* |
90 | * Note: Due to modules and __init, code can | 82 | * Note: Due to modules and __init, code can |
91 | * disappear and change, we need to protect against faulting | 83 | * disappear and change, we need to protect against faulting |
@@ -117,129 +109,12 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
117 | return faulted; | 109 | return faulted; |
118 | } | 110 | } |
119 | 111 | ||
120 | static int notrace ftrace_calc_offset(long ip) | 112 | int __init ftrace_dyn_arch_init(void) |
121 | { | ||
122 | return (int)(MCOUNT_ADDR - ip); | ||
123 | } | ||
124 | |||
125 | notrace void ftrace_code_disable(struct dyn_ftrace *rec) | ||
126 | { | ||
127 | unsigned long ip; | ||
128 | union ftrace_code_union save; | ||
129 | struct ftrace_record *r = | ||
130 | container_of(rec, struct ftrace_record, rec); | ||
131 | |||
132 | ip = rec->ip; | ||
133 | |||
134 | save.e8 = 0xe8; | ||
135 | save.offset = ftrace_calc_offset(ip); | ||
136 | |||
137 | /* move the IP back to the start of the call */ | ||
138 | ip -= CALL_BACK; | ||
139 | |||
140 | r->failed = ftrace_modify_code(ip, save.code, (char *)ftrace_nop); | ||
141 | } | ||
142 | |||
143 | static void notrace ftrace_replace_code(int saved) | ||
144 | { | ||
145 | unsigned char *new = NULL, *old = NULL; | ||
146 | struct ftrace_record *rec; | ||
147 | struct ftrace_page *pg; | ||
148 | unsigned long ip; | ||
149 | int i; | ||
150 | |||
151 | if (saved) | ||
152 | old = (char *)ftrace_nop; | ||
153 | else | ||
154 | new = (char *)ftrace_nop; | ||
155 | |||
156 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
157 | for (i = 0; i < pg->index; i++) { | ||
158 | union ftrace_code_union calc; | ||
159 | rec = &pg->records[i]; | ||
160 | |||
161 | /* don't modify code that has already faulted */ | ||
162 | if (rec->failed) | ||
163 | continue; | ||
164 | |||
165 | ip = rec->rec.ip; | ||
166 | |||
167 | calc.e8 = 0xe8; | ||
168 | calc.offset = ftrace_calc_offset(ip); | ||
169 | |||
170 | if (saved) | ||
171 | new = calc.code; | ||
172 | else | ||
173 | old = calc.code; | ||
174 | |||
175 | ip -= CALL_BACK; | ||
176 | |||
177 | rec->failed = ftrace_modify_code(ip, old, new); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | } | ||
182 | |||
183 | notrace void ftrace_startup_code(void) | ||
184 | { | ||
185 | ftrace_replace_code(1); | ||
186 | } | ||
187 | |||
188 | notrace void ftrace_shutdown_code(void) | ||
189 | { | ||
190 | ftrace_replace_code(0); | ||
191 | } | ||
192 | |||
193 | notrace void ftrace_shutdown_replenish(void) | ||
194 | { | ||
195 | if (ftrace_pages->next) | ||
196 | return; | ||
197 | |||
198 | /* allocate another page */ | ||
199 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
200 | } | ||
201 | |||
202 | notrace int __init ftrace_shutdown_arch_init(void) | ||
203 | { | 113 | { |
204 | const unsigned char *const *noptable = find_nop_table(); | 114 | const unsigned char *const *noptable = find_nop_table(); |
205 | struct ftrace_page *pg; | ||
206 | int cnt; | ||
207 | int i; | ||
208 | 115 | ||
209 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; | 116 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; |
210 | 117 | ||
211 | /* allocate a few pages */ | ||
212 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | ||
213 | if (!ftrace_pages_start) | ||
214 | return -1; | ||
215 | |||
216 | /* | ||
217 | * Allocate a few more pages. | ||
218 | * | ||
219 | * TODO: have some parser search vmlinux before | ||
220 | * final linking to find all calls to ftrace. | ||
221 | * Then we can: | ||
222 | * a) know how many pages to allocate. | ||
223 | * and/or | ||
224 | * b) set up the table then. | ||
225 | * | ||
226 | * The dynamic code is still necessary for | ||
227 | * modules. | ||
228 | */ | ||
229 | |||
230 | pg = ftrace_pages = ftrace_pages_start; | ||
231 | |||
232 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | ||
233 | |||
234 | for (i = 0; i < cnt; i++) { | ||
235 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
236 | |||
237 | /* If we fail, we'll try later anyway */ | ||
238 | if (!pg->next) | ||
239 | break; | ||
240 | |||
241 | pg = pg->next; | ||
242 | } | ||
243 | |||
244 | return 0; | 118 | return 0; |
245 | } | 119 | } |
120 | |||