diff options
Diffstat (limited to 'fs/xfs/support')
-rw-r--r-- | fs/xfs/support/debug.h | 18 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.c | 323 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.h | 85 |
3 files changed, 0 insertions, 426 deletions
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index 6f4fd37c67af..d2d20462fd4f 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h | |||
@@ -41,10 +41,6 @@ extern void assfail(char *expr, char *f, int l); | |||
41 | # define STATIC static noinline | 41 | # define STATIC static noinline |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #ifndef STATIC_INLINE | ||
45 | # define STATIC_INLINE static inline | ||
46 | #endif | ||
47 | |||
48 | #else /* DEBUG */ | 44 | #else /* DEBUG */ |
49 | 45 | ||
50 | #define ASSERT(expr) \ | 46 | #define ASSERT(expr) \ |
@@ -54,19 +50,5 @@ extern void assfail(char *expr, char *f, int l); | |||
54 | # define STATIC noinline | 50 | # define STATIC noinline |
55 | #endif | 51 | #endif |
56 | 52 | ||
57 | /* | ||
58 | * We stop inlining of inline functions in debug mode. | ||
59 | * Unfortunately, this means static inline in header files | ||
60 | * get multiple definitions, so they need to remain static. | ||
61 | * This then gives tonnes of warnings about unused but defined | ||
62 | * functions, so we need to add the unused attribute to prevent | ||
63 | * these spurious warnings. | ||
64 | */ | ||
65 | #ifndef STATIC_INLINE | ||
66 | # define STATIC_INLINE static __attribute__ ((unused)) noinline | ||
67 | #endif | ||
68 | |||
69 | #endif /* DEBUG */ | 53 | #endif /* DEBUG */ |
70 | |||
71 | |||
72 | #endif /* __XFS_SUPPORT_DEBUG_H__ */ | 54 | #endif /* __XFS_SUPPORT_DEBUG_H__ */ |
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c deleted file mode 100644 index 2d494c26717f..000000000000 --- a/fs/xfs/support/ktrace.c +++ /dev/null | |||
@@ -1,323 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include <xfs.h> | ||
19 | |||
20 | static kmem_zone_t *ktrace_hdr_zone; | ||
21 | static kmem_zone_t *ktrace_ent_zone; | ||
22 | static int ktrace_zentries; | ||
23 | |||
24 | void __init | ||
25 | ktrace_init(int zentries) | ||
26 | { | ||
27 | ktrace_zentries = roundup_pow_of_two(zentries); | ||
28 | |||
29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), | ||
30 | "ktrace_hdr"); | ||
31 | ASSERT(ktrace_hdr_zone); | ||
32 | |||
33 | ktrace_ent_zone = kmem_zone_init(ktrace_zentries | ||
34 | * sizeof(ktrace_entry_t), | ||
35 | "ktrace_ent"); | ||
36 | ASSERT(ktrace_ent_zone); | ||
37 | } | ||
38 | |||
39 | void __exit | ||
40 | ktrace_uninit(void) | ||
41 | { | ||
42 | kmem_zone_destroy(ktrace_hdr_zone); | ||
43 | kmem_zone_destroy(ktrace_ent_zone); | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * ktrace_alloc() | ||
48 | * | ||
49 | * Allocate a ktrace header and enough buffering for the given | ||
50 | * number of entries. Round the number of entries up to a | ||
51 | * power of 2 so we can do fast masking to get the index from | ||
52 | * the atomic index counter. | ||
53 | */ | ||
54 | ktrace_t * | ||
55 | ktrace_alloc(int nentries, unsigned int __nocast sleep) | ||
56 | { | ||
57 | ktrace_t *ktp; | ||
58 | ktrace_entry_t *ktep; | ||
59 | int entries; | ||
60 | |||
61 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); | ||
62 | |||
63 | if (ktp == (ktrace_t*)NULL) { | ||
64 | /* | ||
65 | * KM_SLEEP callers don't expect failure. | ||
66 | */ | ||
67 | if (sleep & KM_SLEEP) | ||
68 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); | ||
69 | |||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Special treatment for buffers with the ktrace_zentries entries | ||
75 | */ | ||
76 | entries = roundup_pow_of_two(nentries); | ||
77 | if (entries == ktrace_zentries) { | ||
78 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, | ||
79 | sleep); | ||
80 | } else { | ||
81 | ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), | ||
82 | sleep | KM_LARGE); | ||
83 | } | ||
84 | |||
85 | if (ktep == NULL) { | ||
86 | /* | ||
87 | * KM_SLEEP callers don't expect failure. | ||
88 | */ | ||
89 | if (sleep & KM_SLEEP) | ||
90 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); | ||
91 | |||
92 | kmem_free(ktp); | ||
93 | |||
94 | return NULL; | ||
95 | } | ||
96 | |||
97 | ktp->kt_entries = ktep; | ||
98 | ktp->kt_nentries = entries; | ||
99 | ASSERT(is_power_of_2(entries)); | ||
100 | ktp->kt_index_mask = entries - 1; | ||
101 | atomic_set(&ktp->kt_index, 0); | ||
102 | ktp->kt_rollover = 0; | ||
103 | return ktp; | ||
104 | } | ||
105 | |||
106 | |||
107 | /* | ||
108 | * ktrace_free() | ||
109 | * | ||
110 | * Free up the ktrace header and buffer. It is up to the caller | ||
111 | * to ensure that no-one is referencing it. | ||
112 | */ | ||
113 | void | ||
114 | ktrace_free(ktrace_t *ktp) | ||
115 | { | ||
116 | if (ktp == (ktrace_t *)NULL) | ||
117 | return; | ||
118 | |||
119 | /* | ||
120 | * Special treatment for the Vnode trace buffer. | ||
121 | */ | ||
122 | if (ktp->kt_nentries == ktrace_zentries) | ||
123 | kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); | ||
124 | else | ||
125 | kmem_free(ktp->kt_entries); | ||
126 | |||
127 | kmem_zone_free(ktrace_hdr_zone, ktp); | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | ||
132 | * Enter the given values into the "next" entry in the trace buffer. | ||
133 | * kt_index is always the index of the next entry to be filled. | ||
134 | */ | ||
135 | void | ||
136 | ktrace_enter( | ||
137 | ktrace_t *ktp, | ||
138 | void *val0, | ||
139 | void *val1, | ||
140 | void *val2, | ||
141 | void *val3, | ||
142 | void *val4, | ||
143 | void *val5, | ||
144 | void *val6, | ||
145 | void *val7, | ||
146 | void *val8, | ||
147 | void *val9, | ||
148 | void *val10, | ||
149 | void *val11, | ||
150 | void *val12, | ||
151 | void *val13, | ||
152 | void *val14, | ||
153 | void *val15) | ||
154 | { | ||
155 | int index; | ||
156 | ktrace_entry_t *ktep; | ||
157 | |||
158 | ASSERT(ktp != NULL); | ||
159 | |||
160 | /* | ||
161 | * Grab an entry by pushing the index up to the next one. | ||
162 | */ | ||
163 | index = atomic_add_return(1, &ktp->kt_index); | ||
164 | index = (index - 1) & ktp->kt_index_mask; | ||
165 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) | ||
166 | ktp->kt_rollover = 1; | ||
167 | |||
168 | ASSERT((index >= 0) && (index < ktp->kt_nentries)); | ||
169 | |||
170 | ktep = &(ktp->kt_entries[index]); | ||
171 | |||
172 | ktep->val[0] = val0; | ||
173 | ktep->val[1] = val1; | ||
174 | ktep->val[2] = val2; | ||
175 | ktep->val[3] = val3; | ||
176 | ktep->val[4] = val4; | ||
177 | ktep->val[5] = val5; | ||
178 | ktep->val[6] = val6; | ||
179 | ktep->val[7] = val7; | ||
180 | ktep->val[8] = val8; | ||
181 | ktep->val[9] = val9; | ||
182 | ktep->val[10] = val10; | ||
183 | ktep->val[11] = val11; | ||
184 | ktep->val[12] = val12; | ||
185 | ktep->val[13] = val13; | ||
186 | ktep->val[14] = val14; | ||
187 | ktep->val[15] = val15; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Return the number of entries in the trace buffer. | ||
192 | */ | ||
193 | int | ||
194 | ktrace_nentries( | ||
195 | ktrace_t *ktp) | ||
196 | { | ||
197 | int index; | ||
198 | if (ktp == NULL) | ||
199 | return 0; | ||
200 | |||
201 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; | ||
202 | return (ktp->kt_rollover ? ktp->kt_nentries : index); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * ktrace_first() | ||
207 | * | ||
208 | * This is used to find the start of the trace buffer. | ||
209 | * In conjunction with ktrace_next() it can be used to | ||
210 | * iterate through the entire trace buffer. This code does | ||
211 | * not do any locking because it is assumed that it is called | ||
212 | * from the debugger. | ||
213 | * | ||
214 | * The caller must pass in a pointer to a ktrace_snap | ||
215 | * structure in which we will keep some state used to | ||
216 | * iterate through the buffer. This state must not touched | ||
217 | * by any code outside of this module. | ||
218 | */ | ||
219 | ktrace_entry_t * | ||
220 | ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) | ||
221 | { | ||
222 | ktrace_entry_t *ktep; | ||
223 | int index; | ||
224 | int nentries; | ||
225 | |||
226 | if (ktp->kt_rollover) | ||
227 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; | ||
228 | else | ||
229 | index = 0; | ||
230 | |||
231 | ktsp->ks_start = index; | ||
232 | ktep = &(ktp->kt_entries[index]); | ||
233 | |||
234 | nentries = ktrace_nentries(ktp); | ||
235 | index++; | ||
236 | if (index < nentries) { | ||
237 | ktsp->ks_index = index; | ||
238 | } else { | ||
239 | ktsp->ks_index = 0; | ||
240 | if (index > nentries) | ||
241 | ktep = NULL; | ||
242 | } | ||
243 | return ktep; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * ktrace_next() | ||
248 | * | ||
249 | * This is used to iterate through the entries of the given | ||
250 | * trace buffer. The caller must pass in the ktrace_snap_t | ||
251 | * structure initialized by ktrace_first(). The return value | ||
252 | * will be either a pointer to the next ktrace_entry or NULL | ||
253 | * if all of the entries have been traversed. | ||
254 | */ | ||
255 | ktrace_entry_t * | ||
256 | ktrace_next( | ||
257 | ktrace_t *ktp, | ||
258 | ktrace_snap_t *ktsp) | ||
259 | { | ||
260 | int index; | ||
261 | ktrace_entry_t *ktep; | ||
262 | |||
263 | index = ktsp->ks_index; | ||
264 | if (index == ktsp->ks_start) { | ||
265 | ktep = NULL; | ||
266 | } else { | ||
267 | ktep = &ktp->kt_entries[index]; | ||
268 | } | ||
269 | |||
270 | index++; | ||
271 | if (index == ktrace_nentries(ktp)) { | ||
272 | ktsp->ks_index = 0; | ||
273 | } else { | ||
274 | ktsp->ks_index = index; | ||
275 | } | ||
276 | |||
277 | return ktep; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * ktrace_skip() | ||
282 | * | ||
283 | * Skip the next "count" entries and return the entry after that. | ||
284 | * Return NULL if this causes us to iterate past the beginning again. | ||
285 | */ | ||
286 | ktrace_entry_t * | ||
287 | ktrace_skip( | ||
288 | ktrace_t *ktp, | ||
289 | int count, | ||
290 | ktrace_snap_t *ktsp) | ||
291 | { | ||
292 | int index; | ||
293 | int new_index; | ||
294 | ktrace_entry_t *ktep; | ||
295 | int nentries = ktrace_nentries(ktp); | ||
296 | |||
297 | index = ktsp->ks_index; | ||
298 | new_index = index + count; | ||
299 | while (new_index >= nentries) { | ||
300 | new_index -= nentries; | ||
301 | } | ||
302 | if (index == ktsp->ks_start) { | ||
303 | /* | ||
304 | * We've iterated around to the start, so we're done. | ||
305 | */ | ||
306 | ktep = NULL; | ||
307 | } else if ((new_index < index) && (index < ktsp->ks_index)) { | ||
308 | /* | ||
309 | * We've skipped past the start again, so we're done. | ||
310 | */ | ||
311 | ktep = NULL; | ||
312 | ktsp->ks_index = ktsp->ks_start; | ||
313 | } else { | ||
314 | ktep = &(ktp->kt_entries[new_index]); | ||
315 | new_index++; | ||
316 | if (new_index == nentries) { | ||
317 | ktsp->ks_index = 0; | ||
318 | } else { | ||
319 | ktsp->ks_index = new_index; | ||
320 | } | ||
321 | } | ||
322 | return ktep; | ||
323 | } | ||
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h deleted file mode 100644 index 741d6947ca60..000000000000 --- a/fs/xfs/support/ktrace.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_SUPPORT_KTRACE_H__ | ||
19 | #define __XFS_SUPPORT_KTRACE_H__ | ||
20 | |||
21 | /* | ||
22 | * Trace buffer entry structure. | ||
23 | */ | ||
24 | typedef struct ktrace_entry { | ||
25 | void *val[16]; | ||
26 | } ktrace_entry_t; | ||
27 | |||
28 | /* | ||
29 | * Trace buffer header structure. | ||
30 | */ | ||
31 | typedef struct ktrace { | ||
32 | int kt_nentries; /* number of entries in trace buf */ | ||
33 | atomic_t kt_index; /* current index in entries */ | ||
34 | unsigned int kt_index_mask; | ||
35 | int kt_rollover; | ||
36 | ktrace_entry_t *kt_entries; /* buffer of entries */ | ||
37 | } ktrace_t; | ||
38 | |||
39 | /* | ||
40 | * Trace buffer snapshot structure. | ||
41 | */ | ||
42 | typedef struct ktrace_snap { | ||
43 | int ks_start; /* kt_index at time of snap */ | ||
44 | int ks_index; /* current index */ | ||
45 | } ktrace_snap_t; | ||
46 | |||
47 | |||
48 | #ifdef CONFIG_XFS_TRACE | ||
49 | |||
50 | extern void ktrace_init(int zentries); | ||
51 | extern void ktrace_uninit(void); | ||
52 | |||
53 | extern ktrace_t *ktrace_alloc(int, unsigned int __nocast); | ||
54 | extern void ktrace_free(ktrace_t *); | ||
55 | |||
56 | extern void ktrace_enter( | ||
57 | ktrace_t *, | ||
58 | void *, | ||
59 | void *, | ||
60 | void *, | ||
61 | void *, | ||
62 | void *, | ||
63 | void *, | ||
64 | void *, | ||
65 | void *, | ||
66 | void *, | ||
67 | void *, | ||
68 | void *, | ||
69 | void *, | ||
70 | void *, | ||
71 | void *, | ||
72 | void *, | ||
73 | void *); | ||
74 | |||
75 | extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *); | ||
76 | extern int ktrace_nentries(ktrace_t *); | ||
77 | extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *); | ||
78 | extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *); | ||
79 | |||
80 | #else | ||
81 | #define ktrace_init(x) do { } while (0) | ||
82 | #define ktrace_uninit() do { } while (0) | ||
83 | #endif /* CONFIG_XFS_TRACE */ | ||
84 | |||
85 | #endif /* __XFS_SUPPORT_KTRACE_H__ */ | ||