diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/oprofile |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/oprofile')
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 547 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.h | 22 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 307 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.h | 57 | ||||
-rw-r--r-- | drivers/oprofile/event_buffer.c | 187 | ||||
-rw-r--r-- | drivers/oprofile/event_buffer.h | 48 | ||||
-rw-r--r-- | drivers/oprofile/oprof.c | 188 | ||||
-rw-r--r-- | drivers/oprofile/oprof.h | 39 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_files.c | 135 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_stats.c | 74 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_stats.h | 33 | ||||
-rw-r--r-- | drivers/oprofile/oprofilefs.c | 299 | ||||
-rw-r--r-- | drivers/oprofile/timer_int.c | 46 |
13 files changed, 1982 insertions, 0 deletions
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c new file mode 100644 index 000000000000..55720dc6ec43 --- /dev/null +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -0,0 +1,547 @@ | |||
1 | /** | ||
2 | * @file buffer_sync.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | * | ||
9 | * This is the core of the buffer management. Each | ||
10 | * CPU buffer is processed and entered into the | ||
11 | * global event buffer. Such processing is necessary | ||
12 | * in several circumstances, mentioned below. | ||
13 | * | ||
14 | * The processing does the job of converting the | ||
15 | * transitory EIP value into a persistent dentry/offset | ||
16 | * value that the profiler can record at its leisure. | ||
17 | * | ||
18 | * See fs/dcookies.c for a description of the dentry/offset | ||
19 | * objects. | ||
20 | */ | ||
21 | |||
22 | #include <linux/mm.h> | ||
23 | #include <linux/workqueue.h> | ||
24 | #include <linux/notifier.h> | ||
25 | #include <linux/dcookies.h> | ||
26 | #include <linux/profile.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/fs.h> | ||
29 | |||
30 | #include "oprofile_stats.h" | ||
31 | #include "event_buffer.h" | ||
32 | #include "cpu_buffer.h" | ||
33 | #include "buffer_sync.h" | ||
34 | |||
35 | static LIST_HEAD(dying_tasks); | ||
36 | static LIST_HEAD(dead_tasks); | ||
37 | static cpumask_t marked_cpus = CPU_MASK_NONE; | ||
38 | static DEFINE_SPINLOCK(task_mortuary); | ||
39 | static void process_task_mortuary(void); | ||
40 | |||
41 | |||
42 | /* Take ownership of the task struct and place it on the | ||
43 | * list for processing. Only after two full buffer syncs | ||
44 | * does the task eventually get freed, because by then | ||
45 | * we are sure we will not reference it again. | ||
46 | */ | ||
47 | static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) | ||
48 | { | ||
49 | struct task_struct * task = data; | ||
50 | spin_lock(&task_mortuary); | ||
51 | list_add(&task->tasks, &dying_tasks); | ||
52 | spin_unlock(&task_mortuary); | ||
53 | return NOTIFY_OK; | ||
54 | } | ||
55 | |||
56 | |||
57 | /* The task is on its way out. A sync of the buffer means we can catch | ||
58 | * any remaining samples for this task. | ||
59 | */ | ||
60 | static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) | ||
61 | { | ||
62 | /* To avoid latency problems, we only process the current CPU, | ||
63 | * hoping that most samples for the task are on this CPU | ||
64 | */ | ||
65 | sync_buffer(_smp_processor_id()); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | |||
70 | /* The task is about to try a do_munmap(). We peek at what it's going to | ||
71 | * do, and if it's an executable region, process the samples first, so | ||
72 | * we don't lose any. This does not have to be exact, it's a QoI issue | ||
73 | * only. | ||
74 | */ | ||
75 | static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) | ||
76 | { | ||
77 | unsigned long addr = (unsigned long)data; | ||
78 | struct mm_struct * mm = current->mm; | ||
79 | struct vm_area_struct * mpnt; | ||
80 | |||
81 | down_read(&mm->mmap_sem); | ||
82 | |||
83 | mpnt = find_vma(mm, addr); | ||
84 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | ||
85 | up_read(&mm->mmap_sem); | ||
86 | /* To avoid latency problems, we only process the current CPU, | ||
87 | * hoping that most samples for the task are on this CPU | ||
88 | */ | ||
89 | sync_buffer(_smp_processor_id()); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | up_read(&mm->mmap_sem); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | |||
98 | /* We need to be told about new modules so we don't attribute to a previously | ||
99 | * loaded module, or drop the samples on the floor. | ||
100 | */ | ||
101 | static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) | ||
102 | { | ||
103 | #ifdef CONFIG_MODULES | ||
104 | if (val != MODULE_STATE_COMING) | ||
105 | return 0; | ||
106 | |||
107 | /* FIXME: should we process all CPU buffers ? */ | ||
108 | down(&buffer_sem); | ||
109 | add_event_entry(ESCAPE_CODE); | ||
110 | add_event_entry(MODULE_LOADED_CODE); | ||
111 | up(&buffer_sem); | ||
112 | #endif | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | |||
117 | static struct notifier_block task_free_nb = { | ||
118 | .notifier_call = task_free_notify, | ||
119 | }; | ||
120 | |||
121 | static struct notifier_block task_exit_nb = { | ||
122 | .notifier_call = task_exit_notify, | ||
123 | }; | ||
124 | |||
125 | static struct notifier_block munmap_nb = { | ||
126 | .notifier_call = munmap_notify, | ||
127 | }; | ||
128 | |||
129 | static struct notifier_block module_load_nb = { | ||
130 | .notifier_call = module_load_notify, | ||
131 | }; | ||
132 | |||
133 | |||
134 | static void end_sync(void) | ||
135 | { | ||
136 | end_cpu_work(); | ||
137 | /* make sure we don't leak task structs */ | ||
138 | process_task_mortuary(); | ||
139 | process_task_mortuary(); | ||
140 | } | ||
141 | |||
142 | |||
143 | int sync_start(void) | ||
144 | { | ||
145 | int err; | ||
146 | |||
147 | start_cpu_work(); | ||
148 | |||
149 | err = task_handoff_register(&task_free_nb); | ||
150 | if (err) | ||
151 | goto out1; | ||
152 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | ||
153 | if (err) | ||
154 | goto out2; | ||
155 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | ||
156 | if (err) | ||
157 | goto out3; | ||
158 | err = register_module_notifier(&module_load_nb); | ||
159 | if (err) | ||
160 | goto out4; | ||
161 | |||
162 | out: | ||
163 | return err; | ||
164 | out4: | ||
165 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | ||
166 | out3: | ||
167 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | ||
168 | out2: | ||
169 | task_handoff_unregister(&task_free_nb); | ||
170 | out1: | ||
171 | end_sync(); | ||
172 | goto out; | ||
173 | } | ||
174 | |||
175 | |||
176 | void sync_stop(void) | ||
177 | { | ||
178 | unregister_module_notifier(&module_load_nb); | ||
179 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | ||
180 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | ||
181 | task_handoff_unregister(&task_free_nb); | ||
182 | end_sync(); | ||
183 | } | ||
184 | |||
185 | |||
186 | /* Optimisation. We can manage without taking the dcookie sem | ||
187 | * because we cannot reach this code without at least one | ||
188 | * dcookie user still being registered (namely, the reader | ||
189 | * of the event buffer). */ | ||
190 | static inline unsigned long fast_get_dcookie(struct dentry * dentry, | ||
191 | struct vfsmount * vfsmnt) | ||
192 | { | ||
193 | unsigned long cookie; | ||
194 | |||
195 | if (dentry->d_cookie) | ||
196 | return (unsigned long)dentry; | ||
197 | get_dcookie(dentry, vfsmnt, &cookie); | ||
198 | return cookie; | ||
199 | } | ||
200 | |||
201 | |||
202 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, | ||
203 | * which corresponds loosely to "application name". This is | ||
204 | * not strictly necessary but allows oprofile to associate | ||
205 | * shared-library samples with particular applications | ||
206 | */ | ||
207 | static unsigned long get_exec_dcookie(struct mm_struct * mm) | ||
208 | { | ||
209 | unsigned long cookie = 0; | ||
210 | struct vm_area_struct * vma; | ||
211 | |||
212 | if (!mm) | ||
213 | goto out; | ||
214 | |||
215 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
216 | if (!vma->vm_file) | ||
217 | continue; | ||
218 | if (!(vma->vm_flags & VM_EXECUTABLE)) | ||
219 | continue; | ||
220 | cookie = fast_get_dcookie(vma->vm_file->f_dentry, | ||
221 | vma->vm_file->f_vfsmnt); | ||
222 | break; | ||
223 | } | ||
224 | |||
225 | out: | ||
226 | return cookie; | ||
227 | } | ||
228 | |||
229 | |||
230 | /* Convert the EIP value of a sample into a persistent dentry/offset | ||
231 | * pair that can then be added to the global event buffer. We make | ||
232 | * sure to do this lookup before a mm->mmap modification happens so | ||
233 | * we don't lose track. | ||
234 | */ | ||
235 | static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) | ||
236 | { | ||
237 | unsigned long cookie = 0; | ||
238 | struct vm_area_struct * vma; | ||
239 | |||
240 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | ||
241 | |||
242 | if (!vma->vm_file) | ||
243 | continue; | ||
244 | |||
245 | if (addr < vma->vm_start || addr >= vma->vm_end) | ||
246 | continue; | ||
247 | |||
248 | cookie = fast_get_dcookie(vma->vm_file->f_dentry, | ||
249 | vma->vm_file->f_vfsmnt); | ||
250 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; | ||
251 | break; | ||
252 | } | ||
253 | |||
254 | return cookie; | ||
255 | } | ||
256 | |||
257 | |||
258 | static unsigned long last_cookie = ~0UL; | ||
259 | |||
260 | static void add_cpu_switch(int i) | ||
261 | { | ||
262 | add_event_entry(ESCAPE_CODE); | ||
263 | add_event_entry(CPU_SWITCH_CODE); | ||
264 | add_event_entry(i); | ||
265 | last_cookie = ~0UL; | ||
266 | } | ||
267 | |||
268 | static void add_kernel_ctx_switch(unsigned int in_kernel) | ||
269 | { | ||
270 | add_event_entry(ESCAPE_CODE); | ||
271 | if (in_kernel) | ||
272 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); | ||
273 | else | ||
274 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); | ||
275 | } | ||
276 | |||
277 | static void | ||
278 | add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) | ||
279 | { | ||
280 | add_event_entry(ESCAPE_CODE); | ||
281 | add_event_entry(CTX_SWITCH_CODE); | ||
282 | add_event_entry(task->pid); | ||
283 | add_event_entry(cookie); | ||
284 | /* Another code for daemon back-compat */ | ||
285 | add_event_entry(ESCAPE_CODE); | ||
286 | add_event_entry(CTX_TGID_CODE); | ||
287 | add_event_entry(task->tgid); | ||
288 | } | ||
289 | |||
290 | |||
291 | static void add_cookie_switch(unsigned long cookie) | ||
292 | { | ||
293 | add_event_entry(ESCAPE_CODE); | ||
294 | add_event_entry(COOKIE_SWITCH_CODE); | ||
295 | add_event_entry(cookie); | ||
296 | } | ||
297 | |||
298 | |||
299 | static void add_trace_begin(void) | ||
300 | { | ||
301 | add_event_entry(ESCAPE_CODE); | ||
302 | add_event_entry(TRACE_BEGIN_CODE); | ||
303 | } | ||
304 | |||
305 | |||
306 | static void add_sample_entry(unsigned long offset, unsigned long event) | ||
307 | { | ||
308 | add_event_entry(offset); | ||
309 | add_event_entry(event); | ||
310 | } | ||
311 | |||
312 | |||
313 | static int add_us_sample(struct mm_struct * mm, struct op_sample * s) | ||
314 | { | ||
315 | unsigned long cookie; | ||
316 | off_t offset; | ||
317 | |||
318 | cookie = lookup_dcookie(mm, s->eip, &offset); | ||
319 | |||
320 | if (!cookie) { | ||
321 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | if (cookie != last_cookie) { | ||
326 | add_cookie_switch(cookie); | ||
327 | last_cookie = cookie; | ||
328 | } | ||
329 | |||
330 | add_sample_entry(offset, s->event); | ||
331 | |||
332 | return 1; | ||
333 | } | ||
334 | |||
335 | |||
336 | /* Add a sample to the global event buffer. If possible the | ||
337 | * sample is converted into a persistent dentry/offset pair | ||
338 | * for later lookup from userspace. | ||
339 | */ | ||
340 | static int | ||
341 | add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) | ||
342 | { | ||
343 | if (in_kernel) { | ||
344 | add_sample_entry(s->eip, s->event); | ||
345 | return 1; | ||
346 | } else if (mm) { | ||
347 | return add_us_sample(mm, s); | ||
348 | } else { | ||
349 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | ||
350 | } | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | |||
355 | static void release_mm(struct mm_struct * mm) | ||
356 | { | ||
357 | if (!mm) | ||
358 | return; | ||
359 | up_read(&mm->mmap_sem); | ||
360 | mmput(mm); | ||
361 | } | ||
362 | |||
363 | |||
364 | static struct mm_struct * take_tasks_mm(struct task_struct * task) | ||
365 | { | ||
366 | struct mm_struct * mm = get_task_mm(task); | ||
367 | if (mm) | ||
368 | down_read(&mm->mmap_sem); | ||
369 | return mm; | ||
370 | } | ||
371 | |||
372 | |||
373 | static inline int is_code(unsigned long val) | ||
374 | { | ||
375 | return val == ESCAPE_CODE; | ||
376 | } | ||
377 | |||
378 | |||
379 | /* "acquire" as many cpu buffer slots as we can */ | ||
380 | static unsigned long get_slots(struct oprofile_cpu_buffer * b) | ||
381 | { | ||
382 | unsigned long head = b->head_pos; | ||
383 | unsigned long tail = b->tail_pos; | ||
384 | |||
385 | /* | ||
386 | * Subtle. This resets the persistent last_task | ||
387 | * and in_kernel values used for switching notes. | ||
388 | * BUT, there is a small window between reading | ||
389 | * head_pos, and this call, that means samples | ||
390 | * can appear at the new head position, but not | ||
391 | * be prefixed with the notes for switching | ||
392 | * kernel mode or a task switch. This small hole | ||
393 | * can lead to mis-attribution or samples where | ||
394 | * we don't know if it's in the kernel or not, | ||
395 | * at the start of an event buffer. | ||
396 | */ | ||
397 | cpu_buffer_reset(b); | ||
398 | |||
399 | if (head >= tail) | ||
400 | return head - tail; | ||
401 | |||
402 | return head + (b->buffer_size - tail); | ||
403 | } | ||
404 | |||
405 | |||
406 | static void increment_tail(struct oprofile_cpu_buffer * b) | ||
407 | { | ||
408 | unsigned long new_tail = b->tail_pos + 1; | ||
409 | |||
410 | rmb(); | ||
411 | |||
412 | if (new_tail < b->buffer_size) | ||
413 | b->tail_pos = new_tail; | ||
414 | else | ||
415 | b->tail_pos = 0; | ||
416 | } | ||
417 | |||
418 | |||
419 | /* Move tasks along towards death. Any tasks on dead_tasks | ||
420 | * will definitely have no remaining references in any | ||
421 | * CPU buffers at this point, because we use two lists, | ||
422 | * and to have reached the list, it must have gone through | ||
423 | * one full sync already. | ||
424 | */ | ||
425 | static void process_task_mortuary(void) | ||
426 | { | ||
427 | struct list_head * pos; | ||
428 | struct list_head * pos2; | ||
429 | struct task_struct * task; | ||
430 | |||
431 | spin_lock(&task_mortuary); | ||
432 | |||
433 | list_for_each_safe(pos, pos2, &dead_tasks) { | ||
434 | task = list_entry(pos, struct task_struct, tasks); | ||
435 | list_del(&task->tasks); | ||
436 | free_task(task); | ||
437 | } | ||
438 | |||
439 | list_for_each_safe(pos, pos2, &dying_tasks) { | ||
440 | task = list_entry(pos, struct task_struct, tasks); | ||
441 | list_del(&task->tasks); | ||
442 | list_add_tail(&task->tasks, &dead_tasks); | ||
443 | } | ||
444 | |||
445 | spin_unlock(&task_mortuary); | ||
446 | } | ||
447 | |||
448 | |||
449 | static void mark_done(int cpu) | ||
450 | { | ||
451 | int i; | ||
452 | |||
453 | cpu_set(cpu, marked_cpus); | ||
454 | |||
455 | for_each_online_cpu(i) { | ||
456 | if (!cpu_isset(i, marked_cpus)) | ||
457 | return; | ||
458 | } | ||
459 | |||
460 | /* All CPUs have been processed at least once, | ||
461 | * we can process the mortuary once | ||
462 | */ | ||
463 | process_task_mortuary(); | ||
464 | |||
465 | cpus_clear(marked_cpus); | ||
466 | } | ||
467 | |||
468 | |||
469 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | ||
470 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | ||
471 | * switch so we need a fifth state and some special handling in sync_buffer() | ||
472 | */ | ||
473 | typedef enum { | ||
474 | sb_bt_ignore = -2, | ||
475 | sb_buffer_start, | ||
476 | sb_bt_start, | ||
477 | sb_sample_start, | ||
478 | } sync_buffer_state; | ||
479 | |||
480 | /* Sync one of the CPU's buffers into the global event buffer. | ||
481 | * Here we need to go through each batch of samples punctuated | ||
482 | * by context switch notes, taking the task's mmap_sem and doing | ||
483 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | ||
484 | * value. | ||
485 | */ | ||
486 | void sync_buffer(int cpu) | ||
487 | { | ||
488 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu]; | ||
489 | struct mm_struct *mm = NULL; | ||
490 | struct task_struct * new; | ||
491 | unsigned long cookie = 0; | ||
492 | int in_kernel = 1; | ||
493 | unsigned int i; | ||
494 | sync_buffer_state state = sb_buffer_start; | ||
495 | unsigned long available; | ||
496 | |||
497 | down(&buffer_sem); | ||
498 | |||
499 | add_cpu_switch(cpu); | ||
500 | |||
501 | /* Remember, only we can modify tail_pos */ | ||
502 | |||
503 | available = get_slots(cpu_buf); | ||
504 | |||
505 | for (i = 0; i < available; ++i) { | ||
506 | struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; | ||
507 | |||
508 | if (is_code(s->eip)) { | ||
509 | if (s->event <= CPU_IS_KERNEL) { | ||
510 | /* kernel/userspace switch */ | ||
511 | in_kernel = s->event; | ||
512 | if (state == sb_buffer_start) | ||
513 | state = sb_sample_start; | ||
514 | add_kernel_ctx_switch(s->event); | ||
515 | } else if (s->event == CPU_TRACE_BEGIN) { | ||
516 | state = sb_bt_start; | ||
517 | add_trace_begin(); | ||
518 | } else { | ||
519 | struct mm_struct * oldmm = mm; | ||
520 | |||
521 | /* userspace context switch */ | ||
522 | new = (struct task_struct *)s->event; | ||
523 | |||
524 | release_mm(oldmm); | ||
525 | mm = take_tasks_mm(new); | ||
526 | if (mm != oldmm) | ||
527 | cookie = get_exec_dcookie(mm); | ||
528 | add_user_ctx_switch(new, cookie); | ||
529 | } | ||
530 | } else { | ||
531 | if (state >= sb_bt_start && | ||
532 | !add_sample(mm, s, in_kernel)) { | ||
533 | if (state == sb_bt_start) { | ||
534 | state = sb_bt_ignore; | ||
535 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | ||
536 | } | ||
537 | } | ||
538 | } | ||
539 | |||
540 | increment_tail(cpu_buf); | ||
541 | } | ||
542 | release_mm(mm); | ||
543 | |||
544 | mark_done(cpu); | ||
545 | |||
546 | up(&buffer_sem); | ||
547 | } | ||
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h new file mode 100644 index 000000000000..08866f6a96a3 --- /dev/null +++ b/drivers/oprofile/buffer_sync.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /** | ||
2 | * @file buffer_sync.h | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #ifndef OPROFILE_BUFFER_SYNC_H | ||
11 | #define OPROFILE_BUFFER_SYNC_H | ||
12 | |||
13 | /* add the necessary profiling hooks */ | ||
14 | int sync_start(void); | ||
15 | |||
16 | /* remove the hooks */ | ||
17 | void sync_stop(void); | ||
18 | |||
19 | /* sync the given CPU's buffer */ | ||
20 | void sync_buffer(int cpu); | ||
21 | |||
22 | #endif /* OPROFILE_BUFFER_SYNC_H */ | ||
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c new file mode 100644 index 000000000000..e9b1772a3a28 --- /dev/null +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /** | ||
2 | * @file cpu_buffer.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | * | ||
9 | * Each CPU has a local buffer that stores PC value/event | ||
10 | * pairs. We also log context switches when we notice them. | ||
11 | * Eventually each CPU's buffer is processed into the global | ||
12 | * event buffer by sync_buffer(). | ||
13 | * | ||
14 | * We use a local buffer for two reasons: an NMI or similar | ||
15 | * interrupt cannot synchronise, and high sampling rates | ||
16 | * would lead to catastrophic global synchronisation if | ||
17 | * a global buffer was used. | ||
18 | */ | ||
19 | |||
20 | #include <linux/sched.h> | ||
21 | #include <linux/oprofile.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/errno.h> | ||
24 | |||
25 | #include "event_buffer.h" | ||
26 | #include "cpu_buffer.h" | ||
27 | #include "buffer_sync.h" | ||
28 | #include "oprof.h" | ||
29 | |||
30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; | ||
31 | |||
32 | static void wq_sync_buffer(void *); | ||
33 | |||
34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | ||
35 | static int work_enabled; | ||
36 | |||
37 | void free_cpu_buffers(void) | ||
38 | { | ||
39 | int i; | ||
40 | |||
41 | for_each_online_cpu(i) { | ||
42 | vfree(cpu_buffer[i].buffer); | ||
43 | } | ||
44 | } | ||
45 | |||
46 | |||
47 | int alloc_cpu_buffers(void) | ||
48 | { | ||
49 | int i; | ||
50 | |||
51 | unsigned long buffer_size = fs_cpu_buffer_size; | ||
52 | |||
53 | for_each_online_cpu(i) { | ||
54 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | ||
55 | |||
56 | b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size); | ||
57 | if (!b->buffer) | ||
58 | goto fail; | ||
59 | |||
60 | b->last_task = NULL; | ||
61 | b->last_is_kernel = -1; | ||
62 | b->tracing = 0; | ||
63 | b->buffer_size = buffer_size; | ||
64 | b->tail_pos = 0; | ||
65 | b->head_pos = 0; | ||
66 | b->sample_received = 0; | ||
67 | b->sample_lost_overflow = 0; | ||
68 | b->cpu = i; | ||
69 | INIT_WORK(&b->work, wq_sync_buffer, b); | ||
70 | } | ||
71 | return 0; | ||
72 | |||
73 | fail: | ||
74 | free_cpu_buffers(); | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | |||
78 | |||
79 | void start_cpu_work(void) | ||
80 | { | ||
81 | int i; | ||
82 | |||
83 | work_enabled = 1; | ||
84 | |||
85 | for_each_online_cpu(i) { | ||
86 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | ||
87 | |||
88 | /* | ||
89 | * Spread the work by 1 jiffy per cpu so they dont all | ||
90 | * fire at once. | ||
91 | */ | ||
92 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | |||
97 | void end_cpu_work(void) | ||
98 | { | ||
99 | int i; | ||
100 | |||
101 | work_enabled = 0; | ||
102 | |||
103 | for_each_online_cpu(i) { | ||
104 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | ||
105 | |||
106 | cancel_delayed_work(&b->work); | ||
107 | } | ||
108 | |||
109 | flush_scheduled_work(); | ||
110 | } | ||
111 | |||
112 | |||
113 | /* Resets the cpu buffer to a sane state. */ | ||
114 | void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) | ||
115 | { | ||
116 | /* reset these to invalid values; the next sample | ||
117 | * collected will populate the buffer with proper | ||
118 | * values to initialize the buffer | ||
119 | */ | ||
120 | cpu_buf->last_is_kernel = -1; | ||
121 | cpu_buf->last_task = NULL; | ||
122 | } | ||
123 | |||
124 | |||
125 | /* compute number of available slots in cpu_buffer queue */ | ||
126 | static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) | ||
127 | { | ||
128 | unsigned long head = b->head_pos; | ||
129 | unsigned long tail = b->tail_pos; | ||
130 | |||
131 | if (tail > head) | ||
132 | return (tail - head) - 1; | ||
133 | |||
134 | return tail + (b->buffer_size - head) - 1; | ||
135 | } | ||
136 | |||
137 | |||
138 | static void increment_head(struct oprofile_cpu_buffer * b) | ||
139 | { | ||
140 | unsigned long new_head = b->head_pos + 1; | ||
141 | |||
142 | /* Ensure anything written to the slot before we | ||
143 | * increment is visible */ | ||
144 | wmb(); | ||
145 | |||
146 | if (new_head < b->buffer_size) | ||
147 | b->head_pos = new_head; | ||
148 | else | ||
149 | b->head_pos = 0; | ||
150 | } | ||
151 | |||
152 | |||
153 | |||
154 | |||
155 | inline static void | ||
156 | add_sample(struct oprofile_cpu_buffer * cpu_buf, | ||
157 | unsigned long pc, unsigned long event) | ||
158 | { | ||
159 | struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; | ||
160 | entry->eip = pc; | ||
161 | entry->event = event; | ||
162 | increment_head(cpu_buf); | ||
163 | } | ||
164 | |||
165 | |||
166 | inline static void | ||
167 | add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) | ||
168 | { | ||
169 | add_sample(buffer, ESCAPE_CODE, value); | ||
170 | } | ||
171 | |||
172 | |||
173 | /* This must be safe from any context. It's safe writing here | ||
174 | * because of the head/tail separation of the writer and reader | ||
175 | * of the CPU buffer. | ||
176 | * | ||
177 | * is_kernel is needed because on some architectures you cannot | ||
178 | * tell if you are in kernel or user space simply by looking at | ||
179 | * pc. We tag this in the buffer by generating kernel enter/exit | ||
180 | * events whenever is_kernel changes | ||
181 | */ | ||
182 | static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, | ||
183 | int is_kernel, unsigned long event) | ||
184 | { | ||
185 | struct task_struct * task; | ||
186 | |||
187 | cpu_buf->sample_received++; | ||
188 | |||
189 | if (nr_available_slots(cpu_buf) < 3) { | ||
190 | cpu_buf->sample_lost_overflow++; | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | is_kernel = !!is_kernel; | ||
195 | |||
196 | task = current; | ||
197 | |||
198 | /* notice a switch from user->kernel or vice versa */ | ||
199 | if (cpu_buf->last_is_kernel != is_kernel) { | ||
200 | cpu_buf->last_is_kernel = is_kernel; | ||
201 | add_code(cpu_buf, is_kernel); | ||
202 | } | ||
203 | |||
204 | /* notice a task switch */ | ||
205 | if (cpu_buf->last_task != task) { | ||
206 | cpu_buf->last_task = task; | ||
207 | add_code(cpu_buf, (unsigned long)task); | ||
208 | } | ||
209 | |||
210 | add_sample(cpu_buf, pc, event); | ||
211 | return 1; | ||
212 | } | ||
213 | |||
214 | static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf) | ||
215 | { | ||
216 | if (nr_available_slots(cpu_buf) < 4) { | ||
217 | cpu_buf->sample_lost_overflow++; | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | add_code(cpu_buf, CPU_TRACE_BEGIN); | ||
222 | cpu_buf->tracing = 1; | ||
223 | return 1; | ||
224 | } | ||
225 | |||
226 | |||
227 | static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) | ||
228 | { | ||
229 | cpu_buf->tracing = 0; | ||
230 | } | ||
231 | |||
232 | |||
233 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) | ||
234 | { | ||
235 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | ||
236 | unsigned long pc = profile_pc(regs); | ||
237 | int is_kernel = !user_mode(regs); | ||
238 | |||
239 | if (!backtrace_depth) { | ||
240 | log_sample(cpu_buf, pc, is_kernel, event); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | if (!oprofile_begin_trace(cpu_buf)) | ||
245 | return; | ||
246 | |||
247 | /* if log_sample() fail we can't backtrace since we lost the source | ||
248 | * of this event */ | ||
249 | if (log_sample(cpu_buf, pc, is_kernel, event)) | ||
250 | oprofile_ops.backtrace(regs, backtrace_depth); | ||
251 | oprofile_end_trace(cpu_buf); | ||
252 | } | ||
253 | |||
254 | |||
255 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | ||
256 | { | ||
257 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | ||
258 | log_sample(cpu_buf, pc, is_kernel, event); | ||
259 | } | ||
260 | |||
261 | |||
262 | void oprofile_add_trace(unsigned long pc) | ||
263 | { | ||
264 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | ||
265 | |||
266 | if (!cpu_buf->tracing) | ||
267 | return; | ||
268 | |||
269 | if (nr_available_slots(cpu_buf) < 1) { | ||
270 | cpu_buf->tracing = 0; | ||
271 | cpu_buf->sample_lost_overflow++; | ||
272 | return; | ||
273 | } | ||
274 | |||
275 | /* broken frame can give an eip with the same value as an escape code, | ||
276 | * abort the trace if we get it */ | ||
277 | if (pc == ESCAPE_CODE) { | ||
278 | cpu_buf->tracing = 0; | ||
279 | cpu_buf->backtrace_aborted++; | ||
280 | return; | ||
281 | } | ||
282 | |||
283 | add_sample(cpu_buf, pc, 0); | ||
284 | } | ||
285 | |||
286 | |||
287 | |||
288 | /* | ||
289 | * This serves to avoid cpu buffer overflow, and makes sure | ||
290 | * the task mortuary progresses | ||
291 | * | ||
292 | * By using schedule_delayed_work_on and then schedule_delayed_work | ||
293 | * we guarantee this will stay on the correct cpu | ||
294 | */ | ||
295 | static void wq_sync_buffer(void * data) | ||
296 | { | ||
297 | struct oprofile_cpu_buffer * b = data; | ||
298 | if (b->cpu != smp_processor_id()) { | ||
299 | printk("WQ on CPU%d, prefer CPU%d\n", | ||
300 | smp_processor_id(), b->cpu); | ||
301 | } | ||
302 | sync_buffer(b->cpu); | ||
303 | |||
304 | /* don't re-add the work if we're shutting down */ | ||
305 | if (work_enabled) | ||
306 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | ||
307 | } | ||
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h new file mode 100644 index 000000000000..09abb80e0570 --- /dev/null +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /** | ||
2 | * @file cpu_buffer.h | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #ifndef OPROFILE_CPU_BUFFER_H | ||
11 | #define OPROFILE_CPU_BUFFER_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/workqueue.h> | ||
16 | #include <linux/cache.h> | ||
17 | |||
18 | struct task_struct; | ||
19 | |||
20 | int alloc_cpu_buffers(void); | ||
21 | void free_cpu_buffers(void); | ||
22 | |||
23 | void start_cpu_work(void); | ||
24 | void end_cpu_work(void); | ||
25 | |||
26 | /* CPU buffer is composed of such entries (which are | ||
27 | * also used for context switch notes) | ||
28 | */ | ||
29 | struct op_sample { | ||
30 | unsigned long eip; | ||
31 | unsigned long event; | ||
32 | }; | ||
33 | |||
34 | struct oprofile_cpu_buffer { | ||
35 | volatile unsigned long head_pos; | ||
36 | volatile unsigned long tail_pos; | ||
37 | unsigned long buffer_size; | ||
38 | struct task_struct * last_task; | ||
39 | int last_is_kernel; | ||
40 | int tracing; | ||
41 | struct op_sample * buffer; | ||
42 | unsigned long sample_received; | ||
43 | unsigned long sample_lost_overflow; | ||
44 | unsigned long backtrace_aborted; | ||
45 | int cpu; | ||
46 | struct work_struct work; | ||
47 | } ____cacheline_aligned; | ||
48 | |||
49 | extern struct oprofile_cpu_buffer cpu_buffer[]; | ||
50 | |||
51 | void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf); | ||
52 | |||
53 | /* transient events for the CPU buffer -> event buffer */ | ||
54 | #define CPU_IS_KERNEL 1 | ||
55 | #define CPU_TRACE_BEGIN 2 | ||
56 | |||
57 | #endif /* OPROFILE_CPU_BUFFER_H */ | ||
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c new file mode 100644 index 000000000000..166bca790133 --- /dev/null +++ b/drivers/oprofile/event_buffer.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /** | ||
2 | * @file event_buffer.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | * | ||
9 | * This is the global event buffer that the user-space | ||
10 | * daemon reads from. The event buffer is an untyped array | ||
11 | * of unsigned longs. Entries are prefixed by the | ||
12 | * escape value ESCAPE_CODE followed by an identifying code. | ||
13 | */ | ||
14 | |||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/oprofile.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/dcookies.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | |||
22 | #include "oprof.h" | ||
23 | #include "event_buffer.h" | ||
24 | #include "oprofile_stats.h" | ||
25 | |||
26 | DECLARE_MUTEX(buffer_sem); | ||
27 | |||
28 | static unsigned long buffer_opened; | ||
29 | static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); | ||
30 | static unsigned long * event_buffer; | ||
31 | static unsigned long buffer_size; | ||
32 | static unsigned long buffer_watershed; | ||
33 | static size_t buffer_pos; | ||
34 | /* atomic_t because wait_event checks it outside of buffer_sem */ | ||
35 | static atomic_t buffer_ready = ATOMIC_INIT(0); | ||
36 | |||
37 | /* Add an entry to the event buffer. When we | ||
38 | * get near to the end we wake up the process | ||
39 | * sleeping on the read() of the file. | ||
40 | */ | ||
41 | void add_event_entry(unsigned long value) | ||
42 | { | ||
43 | if (buffer_pos == buffer_size) { | ||
44 | atomic_inc(&oprofile_stats.event_lost_overflow); | ||
45 | return; | ||
46 | } | ||
47 | |||
48 | event_buffer[buffer_pos] = value; | ||
49 | if (++buffer_pos == buffer_size - buffer_watershed) { | ||
50 | atomic_set(&buffer_ready, 1); | ||
51 | wake_up(&buffer_wait); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | |||
56 | /* Wake up the waiting process if any. This happens | ||
57 | * on "echo 0 >/dev/oprofile/enable" so the daemon | ||
58 | * processes the data remaining in the event buffer. | ||
59 | */ | ||
60 | void wake_up_buffer_waiter(void) | ||
61 | { | ||
62 | down(&buffer_sem); | ||
63 | atomic_set(&buffer_ready, 1); | ||
64 | wake_up(&buffer_wait); | ||
65 | up(&buffer_sem); | ||
66 | } | ||
67 | |||
68 | |||
69 | int alloc_event_buffer(void) | ||
70 | { | ||
71 | int err = -ENOMEM; | ||
72 | |||
73 | spin_lock(&oprofilefs_lock); | ||
74 | buffer_size = fs_buffer_size; | ||
75 | buffer_watershed = fs_buffer_watershed; | ||
76 | spin_unlock(&oprofilefs_lock); | ||
77 | |||
78 | if (buffer_watershed >= buffer_size) | ||
79 | return -EINVAL; | ||
80 | |||
81 | event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); | ||
82 | if (!event_buffer) | ||
83 | goto out; | ||
84 | |||
85 | err = 0; | ||
86 | out: | ||
87 | return err; | ||
88 | } | ||
89 | |||
90 | |||
91 | void free_event_buffer(void) | ||
92 | { | ||
93 | vfree(event_buffer); | ||
94 | } | ||
95 | |||
96 | |||
97 | static int event_buffer_open(struct inode * inode, struct file * file) | ||
98 | { | ||
99 | int err = -EPERM; | ||
100 | |||
101 | if (!capable(CAP_SYS_ADMIN)) | ||
102 | return -EPERM; | ||
103 | |||
104 | if (test_and_set_bit(0, &buffer_opened)) | ||
105 | return -EBUSY; | ||
106 | |||
107 | /* Register as a user of dcookies | ||
108 | * to ensure they persist for the lifetime of | ||
109 | * the open event file | ||
110 | */ | ||
111 | err = -EINVAL; | ||
112 | file->private_data = dcookie_register(); | ||
113 | if (!file->private_data) | ||
114 | goto out; | ||
115 | |||
116 | if ((err = oprofile_setup())) | ||
117 | goto fail; | ||
118 | |||
119 | /* NB: the actual start happens from userspace | ||
120 | * echo 1 >/dev/oprofile/enable | ||
121 | */ | ||
122 | |||
123 | return 0; | ||
124 | |||
125 | fail: | ||
126 | dcookie_unregister(file->private_data); | ||
127 | out: | ||
128 | clear_bit(0, &buffer_opened); | ||
129 | return err; | ||
130 | } | ||
131 | |||
132 | |||
133 | static int event_buffer_release(struct inode * inode, struct file * file) | ||
134 | { | ||
135 | oprofile_stop(); | ||
136 | oprofile_shutdown(); | ||
137 | dcookie_unregister(file->private_data); | ||
138 | buffer_pos = 0; | ||
139 | atomic_set(&buffer_ready, 0); | ||
140 | clear_bit(0, &buffer_opened); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | |||
145 | static ssize_t event_buffer_read(struct file * file, char __user * buf, | ||
146 | size_t count, loff_t * offset) | ||
147 | { | ||
148 | int retval = -EINVAL; | ||
149 | size_t const max = buffer_size * sizeof(unsigned long); | ||
150 | |||
151 | /* handling partial reads is more trouble than it's worth */ | ||
152 | if (count != max || *offset) | ||
153 | return -EINVAL; | ||
154 | |||
155 | wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready)); | ||
156 | |||
157 | if (signal_pending(current)) | ||
158 | return -EINTR; | ||
159 | |||
160 | /* can't currently happen */ | ||
161 | if (!atomic_read(&buffer_ready)) | ||
162 | return -EAGAIN; | ||
163 | |||
164 | down(&buffer_sem); | ||
165 | |||
166 | atomic_set(&buffer_ready, 0); | ||
167 | |||
168 | retval = -EFAULT; | ||
169 | |||
170 | count = buffer_pos * sizeof(unsigned long); | ||
171 | |||
172 | if (copy_to_user(buf, event_buffer, count)) | ||
173 | goto out; | ||
174 | |||
175 | retval = count; | ||
176 | buffer_pos = 0; | ||
177 | |||
178 | out: | ||
179 | up(&buffer_sem); | ||
180 | return retval; | ||
181 | } | ||
182 | |||
183 | struct file_operations event_buffer_fops = { | ||
184 | .open = event_buffer_open, | ||
185 | .release = event_buffer_release, | ||
186 | .read = event_buffer_read, | ||
187 | }; | ||
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h new file mode 100644 index 000000000000..442aaad391e0 --- /dev/null +++ b/drivers/oprofile/event_buffer.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /** | ||
2 | * @file event_buffer.h | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #ifndef EVENT_BUFFER_H | ||
11 | #define EVENT_BUFFER_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm/semaphore.h> | ||
15 | |||
16 | int alloc_event_buffer(void); | ||
17 | |||
18 | void free_event_buffer(void); | ||
19 | |||
20 | /* wake up the process sleeping on the event file */ | ||
21 | void wake_up_buffer_waiter(void); | ||
22 | |||
23 | /* Each escaped entry is prefixed by ESCAPE_CODE | ||
24 | * then one of the following codes, then the | ||
25 | * relevant data. | ||
26 | */ | ||
27 | #define ESCAPE_CODE ~0UL | ||
28 | #define CTX_SWITCH_CODE 1 | ||
29 | #define CPU_SWITCH_CODE 2 | ||
30 | #define COOKIE_SWITCH_CODE 3 | ||
31 | #define KERNEL_ENTER_SWITCH_CODE 4 | ||
32 | #define KERNEL_EXIT_SWITCH_CODE 5 | ||
33 | #define MODULE_LOADED_CODE 6 | ||
34 | #define CTX_TGID_CODE 7 | ||
35 | #define TRACE_BEGIN_CODE 8 | ||
36 | #define TRACE_END_CODE 9 | ||
37 | |||
38 | /* add data to the event buffer */ | ||
39 | void add_event_entry(unsigned long data); | ||
40 | |||
41 | extern struct file_operations event_buffer_fops; | ||
42 | |||
43 | /* mutex between sync_cpu_buffers() and the | ||
44 | * file reading code. | ||
45 | */ | ||
46 | extern struct semaphore buffer_sem; | ||
47 | |||
48 | #endif /* EVENT_BUFFER_H */ | ||
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c new file mode 100644 index 000000000000..b3f1cd6a24c1 --- /dev/null +++ b/drivers/oprofile/oprof.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /** | ||
2 | * @file oprof.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/oprofile.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | |||
17 | #include "oprof.h" | ||
18 | #include "event_buffer.h" | ||
19 | #include "cpu_buffer.h" | ||
20 | #include "buffer_sync.h" | ||
21 | #include "oprofile_stats.h" | ||
22 | |||
23 | struct oprofile_operations oprofile_ops; | ||
24 | |||
25 | unsigned long oprofile_started; | ||
26 | unsigned long backtrace_depth; | ||
27 | static unsigned long is_setup; | ||
28 | static DECLARE_MUTEX(start_sem); | ||
29 | |||
30 | /* timer | ||
31 | 0 - use performance monitoring hardware if available | ||
32 | 1 - use the timer int mechanism regardless | ||
33 | */ | ||
34 | static int timer = 0; | ||
35 | |||
36 | int oprofile_setup(void) | ||
37 | { | ||
38 | int err; | ||
39 | |||
40 | down(&start_sem); | ||
41 | |||
42 | if ((err = alloc_cpu_buffers())) | ||
43 | goto out; | ||
44 | |||
45 | if ((err = alloc_event_buffer())) | ||
46 | goto out1; | ||
47 | |||
48 | if (oprofile_ops.setup && (err = oprofile_ops.setup())) | ||
49 | goto out2; | ||
50 | |||
51 | /* Note even though this starts part of the | ||
52 | * profiling overhead, it's necessary to prevent | ||
53 | * us missing task deaths and eventually oopsing | ||
54 | * when trying to process the event buffer. | ||
55 | */ | ||
56 | if ((err = sync_start())) | ||
57 | goto out3; | ||
58 | |||
59 | is_setup = 1; | ||
60 | up(&start_sem); | ||
61 | return 0; | ||
62 | |||
63 | out3: | ||
64 | if (oprofile_ops.shutdown) | ||
65 | oprofile_ops.shutdown(); | ||
66 | out2: | ||
67 | free_event_buffer(); | ||
68 | out1: | ||
69 | free_cpu_buffers(); | ||
70 | out: | ||
71 | up(&start_sem); | ||
72 | return err; | ||
73 | } | ||
74 | |||
75 | |||
76 | /* Actually start profiling (echo 1>/dev/oprofile/enable) */ | ||
77 | int oprofile_start(void) | ||
78 | { | ||
79 | int err = -EINVAL; | ||
80 | |||
81 | down(&start_sem); | ||
82 | |||
83 | if (!is_setup) | ||
84 | goto out; | ||
85 | |||
86 | err = 0; | ||
87 | |||
88 | if (oprofile_started) | ||
89 | goto out; | ||
90 | |||
91 | oprofile_reset_stats(); | ||
92 | |||
93 | if ((err = oprofile_ops.start())) | ||
94 | goto out; | ||
95 | |||
96 | oprofile_started = 1; | ||
97 | out: | ||
98 | up(&start_sem); | ||
99 | return err; | ||
100 | } | ||
101 | |||
102 | |||
103 | /* echo 0>/dev/oprofile/enable */ | ||
104 | void oprofile_stop(void) | ||
105 | { | ||
106 | down(&start_sem); | ||
107 | if (!oprofile_started) | ||
108 | goto out; | ||
109 | oprofile_ops.stop(); | ||
110 | oprofile_started = 0; | ||
111 | /* wake up the daemon to read what remains */ | ||
112 | wake_up_buffer_waiter(); | ||
113 | out: | ||
114 | up(&start_sem); | ||
115 | } | ||
116 | |||
117 | |||
118 | void oprofile_shutdown(void) | ||
119 | { | ||
120 | down(&start_sem); | ||
121 | sync_stop(); | ||
122 | if (oprofile_ops.shutdown) | ||
123 | oprofile_ops.shutdown(); | ||
124 | is_setup = 0; | ||
125 | free_event_buffer(); | ||
126 | free_cpu_buffers(); | ||
127 | up(&start_sem); | ||
128 | } | ||
129 | |||
130 | |||
131 | int oprofile_set_backtrace(unsigned long val) | ||
132 | { | ||
133 | int err = 0; | ||
134 | |||
135 | down(&start_sem); | ||
136 | |||
137 | if (oprofile_started) { | ||
138 | err = -EBUSY; | ||
139 | goto out; | ||
140 | } | ||
141 | |||
142 | if (!oprofile_ops.backtrace) { | ||
143 | err = -EINVAL; | ||
144 | goto out; | ||
145 | } | ||
146 | |||
147 | backtrace_depth = val; | ||
148 | |||
149 | out: | ||
150 | up(&start_sem); | ||
151 | return err; | ||
152 | } | ||
153 | |||
154 | static int __init oprofile_init(void) | ||
155 | { | ||
156 | int err; | ||
157 | |||
158 | err = oprofile_arch_init(&oprofile_ops); | ||
159 | |||
160 | if (err < 0 || timer) { | ||
161 | printk(KERN_INFO "oprofile: using timer interrupt.\n"); | ||
162 | oprofile_timer_init(&oprofile_ops); | ||
163 | } | ||
164 | |||
165 | err = oprofilefs_register(); | ||
166 | if (err) | ||
167 | oprofile_arch_exit(); | ||
168 | |||
169 | return err; | ||
170 | } | ||
171 | |||
172 | |||
173 | static void __exit oprofile_exit(void) | ||
174 | { | ||
175 | oprofilefs_unregister(); | ||
176 | oprofile_arch_exit(); | ||
177 | } | ||
178 | |||
179 | |||
180 | module_init(oprofile_init); | ||
181 | module_exit(oprofile_exit); | ||
182 | |||
183 | module_param_named(timer, timer, int, 0644); | ||
184 | MODULE_PARM_DESC(timer, "force use of timer interrupt"); | ||
185 | |||
186 | MODULE_LICENSE("GPL"); | ||
187 | MODULE_AUTHOR("John Levon <levon@movementarian.org>"); | ||
188 | MODULE_DESCRIPTION("OProfile system profiler"); | ||
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h new file mode 100644 index 000000000000..18323650806e --- /dev/null +++ b/drivers/oprofile/oprof.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /** | ||
2 | * @file oprof.h | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #ifndef OPROF_H | ||
11 | #define OPROF_H | ||
12 | |||
13 | int oprofile_setup(void); | ||
14 | void oprofile_shutdown(void); | ||
15 | |||
16 | int oprofilefs_register(void); | ||
17 | void oprofilefs_unregister(void); | ||
18 | |||
19 | int oprofile_start(void); | ||
20 | void oprofile_stop(void); | ||
21 | |||
22 | struct oprofile_operations; | ||
23 | |||
24 | extern unsigned long fs_buffer_size; | ||
25 | extern unsigned long fs_cpu_buffer_size; | ||
26 | extern unsigned long fs_buffer_watershed; | ||
27 | extern struct oprofile_operations oprofile_ops; | ||
28 | extern unsigned long oprofile_started; | ||
29 | extern unsigned long backtrace_depth; | ||
30 | |||
31 | struct super_block; | ||
32 | struct dentry; | ||
33 | |||
34 | void oprofile_create_files(struct super_block * sb, struct dentry * root); | ||
35 | void oprofile_timer_init(struct oprofile_operations * ops); | ||
36 | |||
37 | int oprofile_set_backtrace(unsigned long depth); | ||
38 | |||
39 | #endif /* OPROF_H */ | ||
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c new file mode 100644 index 000000000000..9abedeaa567c --- /dev/null +++ b/drivers/oprofile/oprofile_files.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /** | ||
2 | * @file oprofile_files.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/oprofile.h> | ||
12 | |||
13 | #include "event_buffer.h" | ||
14 | #include "oprofile_stats.h" | ||
15 | #include "oprof.h" | ||
16 | |||
17 | unsigned long fs_buffer_size = 131072; | ||
18 | unsigned long fs_cpu_buffer_size = 8192; | ||
19 | unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ | ||
20 | |||
21 | static ssize_t depth_read(struct file * file, char * buf, size_t count, loff_t * offset) | ||
22 | { | ||
23 | return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); | ||
24 | } | ||
25 | |||
26 | |||
27 | static ssize_t depth_write(struct file * file, char const * buf, size_t count, loff_t * offset) | ||
28 | { | ||
29 | unsigned long val; | ||
30 | int retval; | ||
31 | |||
32 | if (*offset) | ||
33 | return -EINVAL; | ||
34 | |||
35 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
36 | if (retval) | ||
37 | return retval; | ||
38 | |||
39 | retval = oprofile_set_backtrace(val); | ||
40 | |||
41 | if (retval) | ||
42 | return retval; | ||
43 | return count; | ||
44 | } | ||
45 | |||
46 | |||
47 | static struct file_operations depth_fops = { | ||
48 | .read = depth_read, | ||
49 | .write = depth_write | ||
50 | }; | ||
51 | |||
52 | |||
53 | static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset) | ||
54 | { | ||
55 | return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset); | ||
56 | } | ||
57 | |||
58 | |||
59 | static struct file_operations pointer_size_fops = { | ||
60 | .read = pointer_size_read, | ||
61 | }; | ||
62 | |||
63 | |||
64 | static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset) | ||
65 | { | ||
66 | return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset); | ||
67 | } | ||
68 | |||
69 | |||
70 | static struct file_operations cpu_type_fops = { | ||
71 | .read = cpu_type_read, | ||
72 | }; | ||
73 | |||
74 | |||
75 | static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset) | ||
76 | { | ||
77 | return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); | ||
78 | } | ||
79 | |||
80 | |||
81 | static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) | ||
82 | { | ||
83 | unsigned long val; | ||
84 | int retval; | ||
85 | |||
86 | if (*offset) | ||
87 | return -EINVAL; | ||
88 | |||
89 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
90 | if (retval) | ||
91 | return retval; | ||
92 | |||
93 | if (val) | ||
94 | retval = oprofile_start(); | ||
95 | else | ||
96 | oprofile_stop(); | ||
97 | |||
98 | if (retval) | ||
99 | return retval; | ||
100 | return count; | ||
101 | } | ||
102 | |||
103 | |||
104 | static struct file_operations enable_fops = { | ||
105 | .read = enable_read, | ||
106 | .write = enable_write, | ||
107 | }; | ||
108 | |||
109 | |||
110 | static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) | ||
111 | { | ||
112 | wake_up_buffer_waiter(); | ||
113 | return count; | ||
114 | } | ||
115 | |||
116 | |||
117 | static struct file_operations dump_fops = { | ||
118 | .write = dump_write, | ||
119 | }; | ||
120 | |||
121 | void oprofile_create_files(struct super_block * sb, struct dentry * root) | ||
122 | { | ||
123 | oprofilefs_create_file(sb, root, "enable", &enable_fops); | ||
124 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); | ||
125 | oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); | ||
126 | oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); | ||
127 | oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); | ||
128 | oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); | ||
129 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); | ||
130 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); | ||
131 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); | ||
132 | oprofile_create_stats_files(sb, root); | ||
133 | if (oprofile_ops.create_files) | ||
134 | oprofile_ops.create_files(sb, root); | ||
135 | } | ||
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c new file mode 100644 index 000000000000..e94b1e4a2a84 --- /dev/null +++ b/drivers/oprofile/oprofile_stats.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /** | ||
2 | * @file oprofile_stats.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon | ||
8 | */ | ||
9 | |||
10 | #include <linux/oprofile.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/threads.h> | ||
14 | |||
15 | #include "oprofile_stats.h" | ||
16 | #include "cpu_buffer.h" | ||
17 | |||
18 | struct oprofile_stat_struct oprofile_stats; | ||
19 | |||
20 | void oprofile_reset_stats(void) | ||
21 | { | ||
22 | struct oprofile_cpu_buffer * cpu_buf; | ||
23 | int i; | ||
24 | |||
25 | for_each_cpu(i) { | ||
26 | cpu_buf = &cpu_buffer[i]; | ||
27 | cpu_buf->sample_received = 0; | ||
28 | cpu_buf->sample_lost_overflow = 0; | ||
29 | } | ||
30 | |||
31 | atomic_set(&oprofile_stats.sample_lost_no_mm, 0); | ||
32 | atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); | ||
33 | atomic_set(&oprofile_stats.event_lost_overflow, 0); | ||
34 | } | ||
35 | |||
36 | |||
37 | void oprofile_create_stats_files(struct super_block * sb, struct dentry * root) | ||
38 | { | ||
39 | struct oprofile_cpu_buffer * cpu_buf; | ||
40 | struct dentry * cpudir; | ||
41 | struct dentry * dir; | ||
42 | char buf[10]; | ||
43 | int i; | ||
44 | |||
45 | dir = oprofilefs_mkdir(sb, root, "stats"); | ||
46 | if (!dir) | ||
47 | return; | ||
48 | |||
49 | for_each_cpu(i) { | ||
50 | cpu_buf = &cpu_buffer[i]; | ||
51 | snprintf(buf, 10, "cpu%d", i); | ||
52 | cpudir = oprofilefs_mkdir(sb, dir, buf); | ||
53 | |||
54 | /* Strictly speaking access to these ulongs is racy, | ||
55 | * but we can't simply lock them, and they are | ||
56 | * informational only. | ||
57 | */ | ||
58 | oprofilefs_create_ro_ulong(sb, cpudir, "sample_received", | ||
59 | &cpu_buf->sample_received); | ||
60 | oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow", | ||
61 | &cpu_buf->sample_lost_overflow); | ||
62 | oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", | ||
63 | &cpu_buf->backtrace_aborted); | ||
64 | } | ||
65 | |||
66 | oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", | ||
67 | &oprofile_stats.sample_lost_no_mm); | ||
68 | oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", | ||
69 | &oprofile_stats.sample_lost_no_mapping); | ||
70 | oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow", | ||
71 | &oprofile_stats.event_lost_overflow); | ||
72 | oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", | ||
73 | &oprofile_stats.bt_lost_no_mapping); | ||
74 | } | ||
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h new file mode 100644 index 000000000000..6d755a633f15 --- /dev/null +++ b/drivers/oprofile/oprofile_stats.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /** | ||
2 | * @file oprofile_stats.h | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon | ||
8 | */ | ||
9 | |||
10 | #ifndef OPROFILE_STATS_H | ||
11 | #define OPROFILE_STATS_H | ||
12 | |||
13 | #include <asm/atomic.h> | ||
14 | |||
15 | struct oprofile_stat_struct { | ||
16 | atomic_t sample_lost_no_mm; | ||
17 | atomic_t sample_lost_no_mapping; | ||
18 | atomic_t bt_lost_no_mapping; | ||
19 | atomic_t event_lost_overflow; | ||
20 | }; | ||
21 | |||
22 | extern struct oprofile_stat_struct oprofile_stats; | ||
23 | |||
24 | /* reset all stats to zero */ | ||
25 | void oprofile_reset_stats(void); | ||
26 | |||
27 | struct super_block; | ||
28 | struct dentry; | ||
29 | |||
30 | /* create the stats/ dir */ | ||
31 | void oprofile_create_stats_files(struct super_block * sb, struct dentry * root); | ||
32 | |||
33 | #endif /* OPROFILE_STATS_H */ | ||
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c new file mode 100644 index 000000000000..d6bae699749a --- /dev/null +++ b/drivers/oprofile/oprofilefs.c | |||
@@ -0,0 +1,299 @@ | |||
1 | /** | ||
2 | * @file oprofilefs.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon | ||
8 | * | ||
9 | * A simple filesystem for configuration and | ||
10 | * access of oprofile. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/oprofile.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | |||
20 | #include "oprof.h" | ||
21 | |||
22 | #define OPROFILEFS_MAGIC 0x6f70726f | ||
23 | |||
24 | DEFINE_SPINLOCK(oprofilefs_lock); | ||
25 | |||
26 | static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode) | ||
27 | { | ||
28 | struct inode * inode = new_inode(sb); | ||
29 | |||
30 | if (inode) { | ||
31 | inode->i_mode = mode; | ||
32 | inode->i_uid = 0; | ||
33 | inode->i_gid = 0; | ||
34 | inode->i_blksize = PAGE_CACHE_SIZE; | ||
35 | inode->i_blocks = 0; | ||
36 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
37 | } | ||
38 | return inode; | ||
39 | } | ||
40 | |||
41 | |||
42 | static struct super_operations s_ops = { | ||
43 | .statfs = simple_statfs, | ||
44 | .drop_inode = generic_delete_inode, | ||
45 | }; | ||
46 | |||
47 | |||
48 | ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset) | ||
49 | { | ||
50 | return simple_read_from_buffer(buf, count, offset, str, strlen(str)); | ||
51 | } | ||
52 | |||
53 | |||
54 | #define TMPBUFSIZE 50 | ||
55 | |||
56 | ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset) | ||
57 | { | ||
58 | char tmpbuf[TMPBUFSIZE]; | ||
59 | size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val); | ||
60 | if (maxlen > TMPBUFSIZE) | ||
61 | maxlen = TMPBUFSIZE; | ||
62 | return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen); | ||
63 | } | ||
64 | |||
65 | |||
66 | int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) | ||
67 | { | ||
68 | char tmpbuf[TMPBUFSIZE]; | ||
69 | |||
70 | if (!count) | ||
71 | return 0; | ||
72 | |||
73 | if (count > TMPBUFSIZE - 1) | ||
74 | return -EINVAL; | ||
75 | |||
76 | memset(tmpbuf, 0x0, TMPBUFSIZE); | ||
77 | |||
78 | if (copy_from_user(tmpbuf, buf, count)) | ||
79 | return -EFAULT; | ||
80 | |||
81 | spin_lock(&oprofilefs_lock); | ||
82 | *val = simple_strtoul(tmpbuf, NULL, 0); | ||
83 | spin_unlock(&oprofilefs_lock); | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | |||
88 | static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) | ||
89 | { | ||
90 | unsigned long * val = file->private_data; | ||
91 | return oprofilefs_ulong_to_user(*val, buf, count, offset); | ||
92 | } | ||
93 | |||
94 | |||
95 | static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset) | ||
96 | { | ||
97 | unsigned long * value = file->private_data; | ||
98 | int retval; | ||
99 | |||
100 | if (*offset) | ||
101 | return -EINVAL; | ||
102 | |||
103 | retval = oprofilefs_ulong_from_user(value, buf, count); | ||
104 | |||
105 | if (retval) | ||
106 | return retval; | ||
107 | return count; | ||
108 | } | ||
109 | |||
110 | |||
111 | static int default_open(struct inode * inode, struct file * filp) | ||
112 | { | ||
113 | if (inode->u.generic_ip) | ||
114 | filp->private_data = inode->u.generic_ip; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | |||
119 | static struct file_operations ulong_fops = { | ||
120 | .read = ulong_read_file, | ||
121 | .write = ulong_write_file, | ||
122 | .open = default_open, | ||
123 | }; | ||
124 | |||
125 | |||
126 | static struct file_operations ulong_ro_fops = { | ||
127 | .read = ulong_read_file, | ||
128 | .open = default_open, | ||
129 | }; | ||
130 | |||
131 | |||
132 | static struct dentry * __oprofilefs_create_file(struct super_block * sb, | ||
133 | struct dentry * root, char const * name, struct file_operations * fops, | ||
134 | int perm) | ||
135 | { | ||
136 | struct dentry * dentry; | ||
137 | struct inode * inode; | ||
138 | |||
139 | dentry = d_alloc_name(root, name); | ||
140 | if (!dentry) | ||
141 | return NULL; | ||
142 | inode = oprofilefs_get_inode(sb, S_IFREG | perm); | ||
143 | if (!inode) { | ||
144 | dput(dentry); | ||
145 | return NULL; | ||
146 | } | ||
147 | inode->i_fop = fops; | ||
148 | d_add(dentry, inode); | ||
149 | return dentry; | ||
150 | } | ||
151 | |||
152 | |||
153 | int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, | ||
154 | char const * name, unsigned long * val) | ||
155 | { | ||
156 | struct dentry * d = __oprofilefs_create_file(sb, root, name, | ||
157 | &ulong_fops, 0644); | ||
158 | if (!d) | ||
159 | return -EFAULT; | ||
160 | |||
161 | d->d_inode->u.generic_ip = val; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | |||
166 | int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, | ||
167 | char const * name, unsigned long * val) | ||
168 | { | ||
169 | struct dentry * d = __oprofilefs_create_file(sb, root, name, | ||
170 | &ulong_ro_fops, 0444); | ||
171 | if (!d) | ||
172 | return -EFAULT; | ||
173 | |||
174 | d->d_inode->u.generic_ip = val; | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | |||
179 | static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) | ||
180 | { | ||
181 | atomic_t * val = file->private_data; | ||
182 | return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); | ||
183 | } | ||
184 | |||
185 | |||
186 | static struct file_operations atomic_ro_fops = { | ||
187 | .read = atomic_read_file, | ||
188 | .open = default_open, | ||
189 | }; | ||
190 | |||
191 | |||
192 | int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, | ||
193 | char const * name, atomic_t * val) | ||
194 | { | ||
195 | struct dentry * d = __oprofilefs_create_file(sb, root, name, | ||
196 | &atomic_ro_fops, 0444); | ||
197 | if (!d) | ||
198 | return -EFAULT; | ||
199 | |||
200 | d->d_inode->u.generic_ip = val; | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | |||
205 | int oprofilefs_create_file(struct super_block * sb, struct dentry * root, | ||
206 | char const * name, struct file_operations * fops) | ||
207 | { | ||
208 | if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) | ||
209 | return -EFAULT; | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | |||
214 | int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, | ||
215 | char const * name, struct file_operations * fops, int perm) | ||
216 | { | ||
217 | if (!__oprofilefs_create_file(sb, root, name, fops, perm)) | ||
218 | return -EFAULT; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | |||
223 | struct dentry * oprofilefs_mkdir(struct super_block * sb, | ||
224 | struct dentry * root, char const * name) | ||
225 | { | ||
226 | struct dentry * dentry; | ||
227 | struct inode * inode; | ||
228 | |||
229 | dentry = d_alloc_name(root, name); | ||
230 | if (!dentry) | ||
231 | return NULL; | ||
232 | inode = oprofilefs_get_inode(sb, S_IFDIR | 0755); | ||
233 | if (!inode) { | ||
234 | dput(dentry); | ||
235 | return NULL; | ||
236 | } | ||
237 | inode->i_op = &simple_dir_inode_operations; | ||
238 | inode->i_fop = &simple_dir_operations; | ||
239 | d_add(dentry, inode); | ||
240 | return dentry; | ||
241 | } | ||
242 | |||
243 | |||
244 | static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent) | ||
245 | { | ||
246 | struct inode * root_inode; | ||
247 | struct dentry * root_dentry; | ||
248 | |||
249 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
250 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
251 | sb->s_magic = OPROFILEFS_MAGIC; | ||
252 | sb->s_op = &s_ops; | ||
253 | sb->s_time_gran = 1; | ||
254 | |||
255 | root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755); | ||
256 | if (!root_inode) | ||
257 | return -ENOMEM; | ||
258 | root_inode->i_op = &simple_dir_inode_operations; | ||
259 | root_inode->i_fop = &simple_dir_operations; | ||
260 | root_dentry = d_alloc_root(root_inode); | ||
261 | if (!root_dentry) { | ||
262 | iput(root_inode); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | |||
266 | sb->s_root = root_dentry; | ||
267 | |||
268 | oprofile_create_files(sb, root_dentry); | ||
269 | |||
270 | // FIXME: verify kill_litter_super removes our dentries | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | |||
275 | static struct super_block *oprofilefs_get_sb(struct file_system_type *fs_type, | ||
276 | int flags, const char *dev_name, void *data) | ||
277 | { | ||
278 | return get_sb_single(fs_type, flags, data, oprofilefs_fill_super); | ||
279 | } | ||
280 | |||
281 | |||
282 | static struct file_system_type oprofilefs_type = { | ||
283 | .owner = THIS_MODULE, | ||
284 | .name = "oprofilefs", | ||
285 | .get_sb = oprofilefs_get_sb, | ||
286 | .kill_sb = kill_litter_super, | ||
287 | }; | ||
288 | |||
289 | |||
290 | int __init oprofilefs_register(void) | ||
291 | { | ||
292 | return register_filesystem(&oprofilefs_type); | ||
293 | } | ||
294 | |||
295 | |||
296 | void __exit oprofilefs_unregister(void) | ||
297 | { | ||
298 | unregister_filesystem(&oprofilefs_type); | ||
299 | } | ||
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c new file mode 100644 index 000000000000..710a45f0d734 --- /dev/null +++ b/drivers/oprofile/timer_int.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /** | ||
2 | * @file timer_int.c | ||
3 | * | ||
4 | * @remark Copyright 2002 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author John Levon <levon@movementarian.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/notifier.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/oprofile.h> | ||
14 | #include <linux/profile.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | |||
18 | #include "oprof.h" | ||
19 | |||
20 | static int timer_notify(struct pt_regs *regs) | ||
21 | { | ||
22 | oprofile_add_sample(regs, 0); | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | static int timer_start(void) | ||
27 | { | ||
28 | return register_timer_hook(timer_notify); | ||
29 | } | ||
30 | |||
31 | |||
32 | static void timer_stop(void) | ||
33 | { | ||
34 | unregister_timer_hook(timer_notify); | ||
35 | } | ||
36 | |||
37 | |||
38 | void __init oprofile_timer_init(struct oprofile_operations * ops) | ||
39 | { | ||
40 | ops->create_files = NULL; | ||
41 | ops->setup = NULL; | ||
42 | ops->shutdown = NULL; | ||
43 | ops->start = timer_start; | ||
44 | ops->stop = timer_stop; | ||
45 | ops->cpu_type = "timer"; | ||
46 | } | ||